Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6:
  cifs: add cruid= mount option
  cifs: cFYI the entire error code in map_smb_to_linux_error
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index 03641a0..8906648 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -268,10 +268,6 @@
 !Finclude/net/mac80211.h ieee80211_ops
 !Finclude/net/mac80211.h ieee80211_alloc_hw
 !Finclude/net/mac80211.h ieee80211_register_hw
-!Finclude/net/mac80211.h ieee80211_get_tx_led_name
-!Finclude/net/mac80211.h ieee80211_get_rx_led_name
-!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
-!Finclude/net/mac80211.h ieee80211_get_radio_led_name
 !Finclude/net/mac80211.h ieee80211_unregister_hw
 !Finclude/net/mac80211.h ieee80211_free_hw
       </chapter>
@@ -382,6 +378,23 @@
         </para>
       </partintro>
 
+      <chapter id="led-support">
+        <title>LED support</title>
+        <para>
+         Mac80211 supports various ways of blinking LEDs. Wherever possible,
+         device LEDs should be exposed as LED class devices and hooked up to
+         the appropriate trigger, which will then be triggered appropriately
+         by mac80211.
+        </para>
+!Finclude/net/mac80211.h ieee80211_get_tx_led_name
+!Finclude/net/mac80211.h ieee80211_get_rx_led_name
+!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
+!Finclude/net/mac80211.h ieee80211_get_radio_led_name
+!Finclude/net/mac80211.h ieee80211_tpt_blink
+!Finclude/net/mac80211.h ieee80211_tpt_led_trigger_flags
+!Finclude/net/mac80211.h ieee80211_create_tpt_led_trigger
+      </chapter>
+
       <chapter id="hardware-crypto-offload">
         <title>Hardware crypto acceleration</title>
 !Pinclude/net/mac80211.h Hardware crypto acceleration
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 977d891..4471a41 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -19,6 +19,8 @@
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
+	struct vfsmount *(*d_automount)(struct path *path);
+	int (*d_manage)(struct dentry *, bool);
 
 locking rules:
 		rename_lock	->d_lock	may block	rcu-walk
@@ -29,6 +31,8 @@
 d_release:	no		no		yes		no
 d_iput:		no		no		yes		no
 d_dname:	no		no		no		no
+d_automount:	no		no		yes		no
+d_manage:	no		no		yes (ref-walk)	maybe
 
 --------------------------- inode_operations --------------------------- 
 prototypes:
@@ -56,7 +60,6 @@
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
 	void (*truncate_range)(struct inode *, loff_t, loff_t);
-	long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
 
 locking rules:
@@ -84,7 +87,6 @@
 listxattr:	no
 removexattr:	yes
 truncate_range:	yes
-fallocate:	no
 fiemap:		no
 	Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 victim.
@@ -343,7 +345,6 @@
 	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_break)(struct file_lock *); /* break_lease callback */
-	int (*fl_mylease)(struct file_lock *, struct file_lock *);
 	int (*fl_change)(struct file_lock **, int);
 
 locking rules:
@@ -353,7 +354,6 @@
 fl_grant:		no		no
 fl_release_private:	maybe		no
 fl_break:		yes		no
-fl_mylease:		yes		no
 fl_change		yes		no
 
 --------------------------- buffer_head -----------------------------------
@@ -435,6 +435,7 @@
 	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
 			size_t, unsigned int);
 	int (*setlease)(struct file *, long, struct file_lock **);
+	long (*fallocate)(struct file *, int, loff_t, loff_t);
 };
 
 locking rules:
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index cae6d27..94cf97b 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -864,6 +864,8 @@
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)(struct dentry *, char *, int);
+	struct vfsmount *(*d_automount)(struct path *);
+	int (*d_manage)(struct dentry *, bool, bool);
 };
 
   d_revalidate: called when the VFS needs to revalidate a dentry. This
@@ -930,6 +932,47 @@
 	at the end of the buffer, and returns a pointer to the first char.
 	dynamic_dname() helper function is provided to take care of this.
 
+  d_automount: called when an automount dentry is to be traversed (optional).
+	This should create a new VFS mount record and return the record to the
+	caller.  The caller is supplied with a path parameter giving the
+	automount directory to describe the automount target and the parent
+	VFS mount record to provide inheritable mount parameters.  NULL should
+	be returned if someone else managed to make the automount first.  If
+	the vfsmount creation failed, then an error code should be returned.
+	If -EISDIR is returned, then the directory will be treated as an
+	ordinary directory and returned to pathwalk to continue walking.
+
+	If a vfsmount is returned, the caller will attempt to mount it on the
+	mountpoint and will remove the vfsmount from its expiration list in
+	the case of failure.  The vfsmount should be returned with 2 refs on
+	it to prevent automatic expiration - the caller will clean up the
+	additional ref.
+
+	This function is only used if DCACHE_NEED_AUTOMOUNT is set on the
+	dentry.  This is set by __d_instantiate() if S_AUTOMOUNT is set on the
+	inode being added.
+
+  d_manage: called to allow the filesystem to manage the transition from a
+	dentry (optional).  This allows autofs, for example, to hold up clients
+	waiting to explore behind a 'mountpoint' whilst letting the daemon go
+	past and construct the subtree there.  0 should be returned to let the
+	calling process continue.  -EISDIR can be returned to tell pathwalk to
+	use this directory as an ordinary directory and to ignore anything
+	mounted on it and not to check the automount flag.  Any other error
+	code will abort pathwalk completely.
+
+	If the 'mounting_here' parameter is true, then namespace_sem is being
+	held by the caller and the function should not initiate any mounts or
+	unmounts that it will then wait for.
+
+	If the 'rcu_walk' parameter is true, then the caller is doing a
+	pathwalk in RCU-walk mode.  Sleeping is not permitted in this mode,
+	and the caller can be asked to leave it and call again by returing
+	-ECHILD.
+
+	This function is only used if DCACHE_MANAGE_TRANSIT is set on the
+	dentry being transited from.
+
 Example :
 
 static char *pipefs_dname(struct dentry *dent, char *buffer, int buflen)
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
new file mode 100755
index 0000000..dbeb8a0
--- /dev/null
+++ b/Documentation/target/tcm_mod_builder.py
@@ -0,0 +1,1094 @@
+#!/usr/bin/python
+# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
+#
+# Copyright (c) 2010 Rising Tide Systems
+# Copyright (c) 2010 Linux-iSCSI.org
+#
+# Author: nab@kernel.org
+#
+import os, sys
+import subprocess as sub
+import string
+import re
+import optparse
+
+tcm_dir = ""
+
+fabric_ops = []
+fabric_mod_dir = ""
+fabric_mod_port = ""
+fabric_mod_init_port = ""
+
+def tcm_mod_err(msg):
+	print msg
+	sys.exit(1)
+
+def tcm_mod_create_module_subdir(fabric_mod_dir_var):
+
+	if os.path.isdir(fabric_mod_dir_var) == True:
+		return 1
+
+	print "Creating fabric_mod_dir: " + fabric_mod_dir_var
+	ret = os.mkdir(fabric_mod_dir_var)
+	if ret:
+		tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
+
+	return
+
+def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
+	global fabric_mod_port
+	global fabric_mod_init_port
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
+	print "Writing file: " + f
+
+	p = open(f, 'w');
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#define " + fabric_mod_name.upper() + "_VERSION	\"v0.1\"\n"
+	buf += "#define " + fabric_mod_name.upper() + "_NAMELEN	32\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_nacl {\n"
+	buf += "	/* Binary World Wide unique Port Name for FC Initiator Nport */\n"
+	buf += "	u64 nport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for FC Initiator Nport */\n"
+	buf += "	char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
+	buf += "	struct se_node_acl se_node_acl;\n"
+	buf += "};\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_tpg {\n"
+	buf += "	/* FC lport target portal group tag for TCM */\n"
+	buf += "	u16 lport_tpgt;\n"
+	buf += "	/* Pointer back to " + fabric_mod_name + "_lport */\n"
+	buf += "	struct " + fabric_mod_name + "_lport *lport;\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tpg() */\n"
+	buf += "	struct se_portal_group se_tpg;\n"
+	buf += "};\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_lport {\n"
+	buf += "	/* SCSI protocol the lport is providing */\n"
+	buf += "	u8 lport_proto_id;\n"
+	buf += "	/* Binary World Wide unique Port Name for FC Target Lport */\n"
+	buf += "	u64 lport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for FC Target Lport */\n"
+	buf += "	char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_lport() */\n"
+	buf += "	struct se_wwn lport_wwn;\n"
+	buf += "};\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	fabric_mod_port = "lport"
+	fabric_mod_init_port = "nport"
+
+	return
+
+def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
+	global fabric_mod_port
+	global fabric_mod_init_port
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
+	print "Writing file: " + f
+
+	p = open(f, 'w');
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#define " + fabric_mod_name.upper() + "_VERSION  \"v0.1\"\n"
+	buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_nacl {\n"
+	buf += "	/* Binary World Wide unique Port Name for SAS Initiator port */\n"
+	buf += "	u64 iport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for Sas Initiator port */\n"
+	buf += "	char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
+	buf += "	struct se_node_acl se_node_acl;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tpg {\n"
+	buf += "	/* SAS port target portal group tag for TCM */\n"
+	buf += "	u16 tport_tpgt;\n"
+	buf += "	/* Pointer back to " + fabric_mod_name + "_tport */\n"
+	buf += "	struct " + fabric_mod_name + "_tport *tport;\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tpg() */\n"
+	buf += "	struct se_portal_group se_tpg;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tport {\n"
+	buf += "	/* SCSI protocol the tport is providing */\n"
+	buf += "	u8 tport_proto_id;\n"
+	buf += "	/* Binary World Wide unique Port Name for SAS Target port */\n"
+	buf += "	u64 tport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for SAS Target port */\n"
+	buf += "	char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tport() */\n"
+	buf += "	struct se_wwn tport_wwn;\n"
+	buf += "};\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	fabric_mod_port = "tport"
+	fabric_mod_init_port = "iport"
+
+	return
+
+def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
+	global fabric_mod_port
+	global fabric_mod_init_port
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
+	print "Writing file: " + f
+
+	p = open(f, 'w');
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#define " + fabric_mod_name.upper() + "_VERSION  \"v0.1\"\n"
+	buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_nacl {\n"
+	buf += "	/* ASCII formatted InitiatorName */\n"
+	buf += "	char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
+	buf += "	struct se_node_acl se_node_acl;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tpg {\n"
+	buf += "	/* iSCSI target portal group tag for TCM */\n"
+	buf += "	u16 tport_tpgt;\n"
+	buf += "	/* Pointer back to " + fabric_mod_name + "_tport */\n"
+	buf += "	struct " + fabric_mod_name + "_tport *tport;\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tpg() */\n"
+	buf += "	struct se_portal_group se_tpg;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tport {\n"
+	buf += "	/* SCSI protocol the tport is providing */\n"
+	buf += "	u8 tport_proto_id;\n"
+	buf += "	/* ASCII formatted TargetName for IQN */\n"
+	buf += "	char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tport() */\n"
+	buf += "	struct se_wwn tport_wwn;\n"
+	buf += "};\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	fabric_mod_port = "tport"
+	fabric_mod_init_port = "iport"
+
+	return
+
+def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
+
+	if proto_ident == "FC":
+		tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
+	elif proto_ident == "SAS":
+		tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
+	elif proto_ident == "iSCSI":
+		tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
+	else:
+		print "Unsupported proto_ident: " + proto_ident
+		sys.exit(1)
+
+	return
+
+def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
+	print "Writing file: " + f
+
+        p = open(f, 'w');
+        if not p:
+                tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#include <linux/module.h>\n"
+	buf += "#include <linux/moduleparam.h>\n"
+	buf += "#include <linux/version.h>\n"
+	buf += "#include <generated/utsrelease.h>\n"
+	buf += "#include <linux/utsname.h>\n"
+	buf += "#include <linux/init.h>\n"
+	buf += "#include <linux/slab.h>\n"
+	buf += "#include <linux/kthread.h>\n"
+	buf += "#include <linux/types.h>\n"
+	buf += "#include <linux/string.h>\n"
+	buf += "#include <linux/configfs.h>\n"
+	buf += "#include <linux/ctype.h>\n"
+	buf += "#include <asm/unaligned.h>\n\n"
+	buf += "#include <target/target_core_base.h>\n"
+	buf += "#include <target/target_core_transport.h>\n"
+	buf += "#include <target/target_core_fabric_ops.h>\n"
+	buf += "#include <target/target_core_fabric_configfs.h>\n"
+	buf += "#include <target/target_core_fabric_lib.h>\n"
+	buf += "#include <target/target_core_device.h>\n"
+	buf += "#include <target/target_core_tpg.h>\n"
+	buf += "#include <target/target_core_configfs.h>\n"
+	buf += "#include <target/target_core_base.h>\n"
+	buf += "#include <target/configfs_macros.h>\n\n"
+	buf += "#include <" + fabric_mod_name + "_base.h>\n"
+	buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
+
+	buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
+	buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
+
+	buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
+	buf += "	struct se_portal_group *se_tpg,\n"
+	buf += "	struct config_group *group,\n"
+	buf += "	const char *name)\n"
+	buf += "{\n"
+	buf += "	struct se_node_acl *se_nacl, *se_nacl_new;\n"
+	buf += "	struct " + fabric_mod_name + "_nacl *nacl;\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	u64 wwpn = 0;\n"
+
+	buf += "	u32 nexus_depth;\n\n"
+	buf += "	/* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
+	buf += "		return ERR_PTR(-EINVAL); */\n"
+	buf += "	se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
+	buf += "	if (!(se_nacl_new))\n"
+	buf += "		return ERR_PTR(-ENOMEM);\n"
+	buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
+	buf += "	nexus_depth = 1;\n"
+	buf += "	/*\n"
+	buf += "	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
+	buf += "	 * when converting a NodeACL from demo mode -> explict\n"
+	buf += "	 */\n"
+	buf += "	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
+	buf += "				name, nexus_depth);\n"
+	buf += "	if (IS_ERR(se_nacl)) {\n"
+	buf += "		" + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
+	buf += "		return se_nacl;\n"
+	buf += "	}\n"
+	buf += "	/*\n"
+	buf += "	 * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
+	buf += "	 */\n"
+	buf += "	nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
+
+	buf += "	/* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
+	buf += "	return se_nacl;\n"
+	buf += "}\n\n"
+	buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
+	buf += "				struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+	buf += "	kfree(nacl);\n"
+	buf += "}\n\n"
+
+	buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
+	buf += "	struct se_wwn *wwn,\n"
+	buf += "	struct config_group *group,\n"
+	buf += "	const char *name)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
+	buf += "			struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
+	buf += "	struct " + fabric_mod_name + "_tpg *tpg;\n"
+	buf += "	unsigned long tpgt;\n"
+	buf += "	int ret;\n\n"
+	buf += "	if (strstr(name, \"tpgt_\") != name)\n"
+	buf += "		return ERR_PTR(-EINVAL);\n"
+	buf += "	if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
+	buf += "		return ERR_PTR(-EINVAL);\n\n"
+	buf += "	tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
+	buf += "	if (!(tpg)) {\n"
+	buf += "		printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
+	buf += "		return ERR_PTR(-ENOMEM);\n"
+	buf += "	}\n"
+	buf += "	tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
+	buf += "	tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
+	buf += "	ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
+	buf += "				&tpg->se_tpg, (void *)tpg,\n"
+	buf += "				TRANSPORT_TPG_TYPE_NORMAL);\n"
+	buf += "	if (ret < 0) {\n"
+	buf += "		kfree(tpg);\n"
+	buf += "		return NULL;\n"
+	buf += "	}\n"
+	buf += "	return &tpg->se_tpg;\n"
+	buf += "}\n\n"
+	buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+	buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
+	buf += "	core_tpg_deregister(se_tpg);\n"
+	buf += "	kfree(tpg);\n"
+	buf += "}\n\n"
+
+	buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
+	buf += "	struct target_fabric_configfs *tf,\n"
+	buf += "	struct config_group *group,\n"
+	buf += "	const char *name)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	u64 wwpn = 0;\n\n"
+
+	buf += "	/* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
+	buf += "		return ERR_PTR(-EINVAL); */\n\n"
+	buf += "	" + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
+	buf += "	if (!(" + fabric_mod_port + ")) {\n"
+	buf += "		printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
+	buf += "		return ERR_PTR(-ENOMEM);\n"
+	buf += "	}\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	" + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
+
+	buf += "	/* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
+	buf += "	return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
+	buf += "}\n\n"
+	buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
+	buf += "				struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
+	buf += "	kfree(" + fabric_mod_port + ");\n"
+	buf += "}\n\n"
+	buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
+	buf += "	struct target_fabric_configfs *tf,\n"
+	buf += "	char *page)\n"
+	buf += "{\n"
+	buf += "	return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
+	buf += "		\"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
+	buf += "		utsname()->machine);\n"
+	buf += "}\n\n"
+	buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
+	buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
+	buf += "	&" + fabric_mod_name + "_wwn_version.attr,\n"
+	buf += "	NULL,\n"
+	buf += "};\n\n"
+
+	buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
+	buf += "	.get_fabric_name		= " + fabric_mod_name + "_get_fabric_name,\n"
+	buf += "	.get_fabric_proto_ident		= " + fabric_mod_name + "_get_fabric_proto_ident,\n"
+	buf += "	.tpg_get_wwn			= " + fabric_mod_name + "_get_fabric_wwn,\n"
+	buf += "	.tpg_get_tag			= " + fabric_mod_name + "_get_tag,\n"
+	buf += "	.tpg_get_default_depth		= " + fabric_mod_name + "_get_default_depth,\n"
+	buf += "	.tpg_get_pr_transport_id	= " + fabric_mod_name + "_get_pr_transport_id,\n"
+	buf += "	.tpg_get_pr_transport_id_len	= " + fabric_mod_name + "_get_pr_transport_id_len,\n"
+	buf += "	.tpg_parse_pr_out_transport_id	= " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
+	buf += "	.tpg_check_demo_mode		= " + fabric_mod_name + "_check_false,\n"
+	buf += "	.tpg_check_demo_mode_cache	= " + fabric_mod_name + "_check_true,\n"
+	buf += "	.tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
+	buf += "	.tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
+	buf += "	.tpg_alloc_fabric_acl		= " + fabric_mod_name + "_alloc_fabric_acl,\n"
+	buf += "	.tpg_release_fabric_acl		= " + fabric_mod_name + "_release_fabric_acl,\n"
+	buf += "	.tpg_get_inst_index		= " + fabric_mod_name + "_tpg_get_inst_index,\n"
+	buf += "	.release_cmd_to_pool		= " + fabric_mod_name + "_release_cmd,\n"
+	buf += "	.release_cmd_direct		= " + fabric_mod_name + "_release_cmd,\n"
+	buf += "	.shutdown_session		= " + fabric_mod_name + "_shutdown_session,\n"
+	buf += "	.close_session			= " + fabric_mod_name + "_close_session,\n"
+	buf += "	.stop_session			= " + fabric_mod_name + "_stop_session,\n"
+	buf += "	.fall_back_to_erl0		= " + fabric_mod_name + "_reset_nexus,\n"
+	buf += "	.sess_logged_in			= " + fabric_mod_name + "_sess_logged_in,\n"
+	buf += "	.sess_get_index			= " + fabric_mod_name + "_sess_get_index,\n"
+	buf += "	.sess_get_initiator_sid		= NULL,\n"
+	buf += "	.write_pending			= " + fabric_mod_name + "_write_pending,\n"
+	buf += "	.write_pending_status		= " + fabric_mod_name + "_write_pending_status,\n"
+	buf += "	.set_default_node_attributes	= " + fabric_mod_name + "_set_default_node_attrs,\n"
+	buf += "	.get_task_tag			= " + fabric_mod_name + "_get_task_tag,\n"
+	buf += "	.get_cmd_state			= " + fabric_mod_name + "_get_cmd_state,\n"
+	buf += "	.new_cmd_failure		= " + fabric_mod_name + "_new_cmd_failure,\n"
+	buf += "	.queue_data_in			= " + fabric_mod_name + "_queue_data_in,\n"
+	buf += "	.queue_status			= " + fabric_mod_name + "_queue_status,\n"
+	buf += "	.queue_tm_rsp			= " + fabric_mod_name + "_queue_tm_rsp,\n"
+	buf += "	.get_fabric_sense_len		= " + fabric_mod_name + "_get_fabric_sense_len,\n"
+	buf += "	.set_fabric_sense_len		= " + fabric_mod_name + "_set_fabric_sense_len,\n"
+	buf += "	.is_state_remove		= " + fabric_mod_name + "_is_state_remove,\n"
+	buf += "	.pack_lun			= " + fabric_mod_name + "_pack_lun,\n"
+	buf += "	/*\n"
+	buf += "	 * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
+	buf += "	 */\n"
+	buf += "	.fabric_make_wwn		= " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
+	buf += "	.fabric_drop_wwn		= " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
+	buf += "	.fabric_make_tpg		= " + fabric_mod_name + "_make_tpg,\n"
+	buf += "	.fabric_drop_tpg		= " + fabric_mod_name + "_drop_tpg,\n"
+	buf += "	.fabric_post_link		= NULL,\n"
+	buf += "	.fabric_pre_unlink		= NULL,\n"
+	buf += "	.fabric_make_np			= NULL,\n"
+	buf += "	.fabric_drop_np			= NULL,\n"
+	buf += "	.fabric_make_nodeacl		= " + fabric_mod_name + "_make_nodeacl,\n"
+	buf += "	.fabric_drop_nodeacl		= " + fabric_mod_name + "_drop_nodeacl,\n"
+	buf += "};\n\n"
+
+	buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
+	buf += "{\n"
+	buf += "	struct target_fabric_configfs *fabric;\n"
+	buf += "	int ret;\n\n"
+	buf += "	printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
+	buf += "		\" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
+	buf += "		utsname()->machine);\n"
+	buf += "	/*\n"
+	buf += "	 * Register the top level struct config_item_type with TCM core\n"
+	buf += "	 */\n"
+	buf += "	fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+	buf += "	if (!(fabric)) {\n"
+	buf += "		printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
+	buf += "		return -ENOMEM;\n"
+	buf += "	}\n"
+	buf += "	/*\n"
+	buf += "	 * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
+	buf += "	 */\n"
+	buf += "	fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
+	buf += "	/*\n"
+	buf += "	 * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
+	buf += "	 */\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
+	buf += "	/*\n"
+	buf += "	 * Register the fabric for use within TCM\n"
+	buf += "	 */\n"
+	buf += "	ret = target_fabric_configfs_register(fabric);\n"
+	buf += "	if (ret < 0) {\n"
+	buf += "		printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
+	buf += "				\" for " + fabric_mod_name.upper() + "\\n\");\n"
+	buf += "		return ret;\n"
+	buf += "	}\n"
+	buf += "	/*\n"
+	buf += "	 * Setup our local pointer to *fabric\n"
+	buf += "	 */\n"
+	buf += "	" + fabric_mod_name + "_fabric_configfs = fabric;\n"
+	buf += "	printk(KERN_INFO \"" +  fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
+	buf += "	return 0;\n"
+	buf += "};\n\n"
+	buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
+	buf += "{\n"
+	buf += "	if (!(" + fabric_mod_name + "_fabric_configfs))\n"
+	buf += "		return;\n\n"
+	buf += "	target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
+	buf += "	" + fabric_mod_name + "_fabric_configfs = NULL;\n"
+	buf += "	printk(KERN_INFO \"" +  fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
+	buf += "};\n\n"
+
+	buf += "static int __init " + fabric_mod_name + "_init(void)\n"
+	buf += "{\n"
+	buf += "	int ret;\n\n"
+	buf += "	ret = " + fabric_mod_name + "_register_configfs();\n"
+	buf += "	if (ret < 0)\n"
+	buf += "		return ret;\n\n"
+	buf += "	return 0;\n"
+	buf += "};\n\n"
+	buf += "static void " + fabric_mod_name + "_exit(void)\n"
+	buf += "{\n"
+	buf += "	" + fabric_mod_name + "_deregister_configfs();\n"
+	buf += "};\n\n"
+
+	buf += "#ifdef MODULE\n"
+	buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
+	buf += "MODULE_LICENSE(\"GPL\");\n"
+	buf += "module_init(" + fabric_mod_name + "_init);\n"
+	buf += "module_exit(" + fabric_mod_name + "_exit);\n"
+	buf += "#endif\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	return
+
+def tcm_mod_scan_fabric_ops(tcm_dir):
+
+	fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
+
+	print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
+	process_fo = 0;
+
+	p = open(fabric_ops_api, 'r')
+
+	line = p.readline()
+	while line:
+		if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
+			line = p.readline()
+			continue
+
+		if process_fo == 0:
+			process_fo = 1;
+			line = p.readline()
+			# Search for function pointer
+			if not re.search('\(\*', line):
+				continue
+
+			fabric_ops.append(line.rstrip())
+			continue
+
+		line = p.readline()
+		# Search for function pointer
+		if not re.search('\(\*', line):
+			continue
+
+		fabric_ops.append(line.rstrip())
+
+	p.close()
+	return
+
+def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
+	buf = ""
+	bufi = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
+	print "Writing file: " + f
+
+	p = open(f, 'w')
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
+	print "Writing file: " + fi
+
+	pi = open(fi, 'w')
+	if not pi:
+		tcm_mod_err("Unable to open file: " + fi)
+
+	buf = "#include <linux/slab.h>\n"
+	buf += "#include <linux/kthread.h>\n"
+	buf += "#include <linux/types.h>\n"
+	buf += "#include <linux/list.h>\n"
+	buf += "#include <linux/types.h>\n"
+	buf += "#include <linux/string.h>\n"
+	buf += "#include <linux/ctype.h>\n"
+	buf += "#include <asm/unaligned.h>\n"
+	buf += "#include <scsi/scsi.h>\n"
+	buf += "#include <scsi/scsi_host.h>\n"
+	buf += "#include <scsi/scsi_device.h>\n"
+	buf += "#include <scsi/scsi_cmnd.h>\n"
+	buf += "#include <scsi/libfc.h>\n\n"
+	buf += "#include <target/target_core_base.h>\n"
+	buf += "#include <target/target_core_transport.h>\n"
+	buf += "#include <target/target_core_fabric_ops.h>\n"
+	buf += "#include <target/target_core_fabric_lib.h>\n"
+	buf += "#include <target/target_core_device.h>\n"
+	buf += "#include <target/target_core_tpg.h>\n"
+	buf += "#include <target/target_core_configfs.h>\n"
+	buf += "#include <" + fabric_mod_name + "_base.h>\n"
+	buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
+
+	buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
+	buf += "{\n"
+	buf += "	return 1;\n"
+	buf += "}\n\n"
+	bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
+
+	buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
+	buf += "{\n"
+	buf += "	return 0;\n"
+	buf += "}\n\n"
+	bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
+
+	total_fabric_ops = len(fabric_ops)
+	i = 0
+
+	while i < total_fabric_ops:
+		fo = fabric_ops[i]
+		i += 1
+#		print "fabric_ops: " + fo
+
+		if re.search('get_fabric_name', fo):
+			buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
+			buf += "{\n"
+			buf += "	return \"" + fabric_mod_name[4:] + "\";\n"
+			buf += "}\n\n"
+			bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
+			continue
+
+		if re.search('get_fabric_proto_ident', fo):
+			buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	u8 proto_id;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
+				buf += "		break;\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
+				buf += "		break;\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
+				buf += "		break;\n"
+
+			buf += "	}\n\n"
+			buf += "	return proto_id;\n"
+			buf += "}\n\n"
+			bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
+
+		if re.search('get_wwn', fo):
+			buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
+			buf += "	return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
+			buf += "}\n\n"
+			bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
+
+		if re.search('get_tag', fo):
+			buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	return tpg->" + fabric_mod_port + "_tpgt;\n"
+			buf += "}\n\n"
+			bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
+
+		if re.search('get_default_depth', fo):
+			buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	return 1;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
+
+		if re.search('get_pr_transport_id\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	struct se_node_acl *se_nacl,\n"
+			buf += "	struct t10_pr_registration *pr_reg,\n"
+			buf += "	int *format_code,\n"
+			buf += "	unsigned char *buf)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	int ret = 0;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code, buf);\n"
+				buf += "		break;\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code, buf);\n"
+				buf += "		break;\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code, buf);\n"
+				buf += "		break;\n"
+
+			buf += "	}\n\n"
+			buf += "	return ret;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
+			bufi += "			struct se_node_acl *, struct t10_pr_registration *,\n"
+			bufi += "			int *, unsigned char *);\n"
+
+		if re.search('get_pr_transport_id_len\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	struct se_node_acl *se_nacl,\n"
+			buf += "	struct t10_pr_registration *pr_reg,\n"
+			buf += "	int *format_code)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	int ret = 0;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code);\n"
+				buf += "		break;\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code);\n"
+				buf += "		break;\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code);\n"
+				buf += "		break;\n"
+
+
+			buf += "	}\n\n"
+			buf += "	return ret;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
+			bufi += "			struct se_node_acl *, struct t10_pr_registration *,\n"
+			bufi += "			int *);\n"
+
+		if re.search('parse_pr_out_transport_id\)\(', fo):
+			buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	const char *buf,\n"
+			buf += "	u32 *out_tid_len,\n"
+			buf += "	char **port_nexus_ptr)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	char *tid = NULL;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
+				buf += "					port_nexus_ptr);\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
+				buf += "					port_nexus_ptr);\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
+				buf += "					port_nexus_ptr);\n"
+
+			buf += "	}\n\n"
+			buf += "	return tid;\n"
+			buf += "}\n\n"
+			bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
+			bufi +=	"			const char *, u32 *, char **);\n"
+
+		if re.search('alloc_fabric_acl\)\(', fo):
+			buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_nacl *nacl;\n\n"
+			buf += "	nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
+			buf += "	if (!(nacl)) {\n"
+			buf += "		printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
+			buf += "		return NULL;\n"
+			buf += "	}\n\n"
+			buf += "	return &nacl->se_node_acl;\n"
+			buf += "}\n\n"
+			bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
+
+		if re.search('release_fabric_acl\)\(', fo):
+			buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	struct se_node_acl *se_nacl)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
+			buf += "			struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+			buf += "	kfree(nacl);\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
+			bufi +=	"			struct se_node_acl *);\n"
+
+		if re.search('tpg_get_inst_index\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	return 1;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
+
+		if re.search('release_cmd_to_pool', fo):
+			buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
+
+		if re.search('shutdown_session\)\(', fo):
+			buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
+
+		if re.search('close_session\)\(', fo):
+			buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
+
+		if re.search('stop_session\)\(', fo):
+			buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
+
+		if re.search('fall_back_to_erl0\)\(', fo):
+			buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
+
+		if re.search('sess_logged_in\)\(', fo):
+			buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
+
+		if re.search('sess_get_index\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
+
+		if re.search('write_pending\)\(', fo):
+			buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
+
+		if re.search('write_pending_status\)\(', fo):
+			buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
+
+		if re.search('set_default_node_attributes\)\(', fo):
+			buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
+
+		if re.search('get_task_tag\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
+
+		if re.search('get_cmd_state\)\(', fo):
+			buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
+
+		if re.search('new_cmd_failure\)\(', fo):
+			buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
+
+		if re.search('queue_data_in\)\(', fo):
+			buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
+
+		if re.search('queue_status\)\(', fo):
+			buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
+
+		if re.search('queue_tm_rsp\)\(', fo):
+			buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+
+		if re.search('get_fabric_sense_len\)\(', fo):
+			buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
+
+		if re.search('set_fabric_sense_len\)\(', fo):
+			buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
+
+		if re.search('is_state_remove\)\(', fo):
+			buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
+
+		if re.search('pack_lun\)\(', fo):
+			buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
+			buf += "{\n"
+			buf += "	WARN_ON(lun >= 256);\n"
+			buf += "	/* Caller wants this byte-swapped */\n"
+			buf += "	return cpu_to_le64((lun & 0xff) << 8);\n"
+			buf += "}\n\n"
+			bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
+
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	ret = pi.write(bufi)
+	if ret:
+		tcm_mod_err("Unable to write fi: " + fi)
+
+	pi.close()
+	return
+
+def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
+
+	buf = ""
+	f = fabric_mod_dir_var + "/Kbuild"
+	print "Writing file: " + f
+
+	p = open(f, 'w')
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/include/ -I$(srctree)/drivers/scsi/ -I$(srctree)/include/scsi/ -I$(srctree)/drivers/target/" + fabric_mod_name + "\n\n"
+	buf += fabric_mod_name + "-objs			:= " + fabric_mod_name + "_fabric.o \\\n"
+	buf += "					   " + fabric_mod_name + "_configfs.o\n"
+	buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ")		+= " + fabric_mod_name + ".o\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+	return
+
+def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
+
+	buf = ""
+	f = fabric_mod_dir_var + "/Kconfig"
+	print "Writing file: " + f
+
+	p = open(f, 'w')
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "config " + fabric_mod_name.upper() + "\n"
+	buf += "	tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
+	buf += "	depends on TARGET_CORE && CONFIGFS_FS\n"
+	buf += "	default n\n"
+	buf += "	---help---\n"
+	buf += "	Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+	return
+
+def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
+	buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ")	+= " + fabric_mod_name.lower() + "/\n"
+	kbuild = tcm_dir + "/drivers/target/Kbuild"
+
+	f = open(kbuild, 'a')
+	f.write(buf)
+	f.close()
+	return
+
+def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
+	buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
+	kconfig = tcm_dir + "/drivers/target/Kconfig"
+
+	f = open(kconfig, 'a')
+	f.write(buf)
+	f.close()
+	return
+
+def main(modname, proto_ident):
+#	proto_ident = "FC"
+#	proto_ident = "SAS"
+#	proto_ident = "iSCSI"
+
+	tcm_dir = os.getcwd();
+	tcm_dir += "/../../"
+	print "tcm_dir: " + tcm_dir
+	fabric_mod_name = modname
+	fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
+	print "Set fabric_mod_name: " + fabric_mod_name
+	print "Set fabric_mod_dir: " + fabric_mod_dir
+	print "Using proto_ident: " + proto_ident
+
+	if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
+		print "Unsupported proto_ident: " + proto_ident
+		sys.exit(1)
+
+	ret = tcm_mod_create_module_subdir(fabric_mod_dir)
+	if ret:
+		print "tcm_mod_create_module_subdir() failed because module already exists!"
+		sys.exit(1)
+
+	tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
+	tcm_mod_scan_fabric_ops(tcm_dir)
+	tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
+	tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
+	tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
+	tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
+
+	input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kbuild..? [yes,no]: ")
+	if input == "yes" or input == "y":
+		tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
+
+	input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+	if input == "yes" or input == "y":
+		tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
+
+	return
+
+parser = optparse.OptionParser()
+parser.add_option('-m', '--modulename', help='Module name', dest='modname',
+		action='store', nargs=1, type='string')
+parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
+		action='store', nargs=1, type='string')
+
+(opts, args) = parser.parse_args()
+
+mandatories = ['modname', 'protoident']
+for m in mandatories:
+	if not opts.__dict__[m]:
+		print "mandatory option is missing\n"
+		parser.print_help()
+		exit(-1)
+
+if __name__ == "__main__":
+
+	main(str(opts.modname), opts.protoident)
diff --git a/Documentation/target/tcm_mod_builder.txt b/Documentation/target/tcm_mod_builder.txt
new file mode 100644
index 0000000..84533d8
--- /dev/null
+++ b/Documentation/target/tcm_mod_builder.txt
@@ -0,0 +1,145 @@
+>>>>>>>>>> The TCM v4 fabric module script generator <<<<<<<<<<
+
+Greetings all,
+
+This document is intended to be a mini-HOWTO for using the tcm_mod_builder.py
+script to generate a brand new functional TCM v4 fabric .ko module of your very own,
+that once built can be immediately be loaded to start access the new TCM/ConfigFS
+fabric skeleton, by simply using:
+
+	modprobe $TCM_NEW_MOD
+	mkdir -p /sys/kernel/config/target/$TCM_NEW_MOD
+
+This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following
+
+	*) Generate new API callers for drivers/target/target_core_fabric_configs.c logic
+	   ->make_nodeacl(), ->drop_nodeacl(), ->make_tpg(), ->drop_tpg()
+	   ->make_wwn(), ->drop_wwn().  These are created into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c
+	*) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module
+	   using a skeleton struct target_core_fabric_ops API template.
+	*) Based on user defined T10 Proto_Ident for the new fabric module being built,
+	   the TransportID / Initiator and Target WWPN related handlers for
+	   SPC-3 persistent reservation are automatically generated in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
+	   using drivers/target/target_core_fabric_lib.c logic.
+	*) NOP API calls for all other Data I/O path and fabric dependent attribute logic
+	   in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
+
+tcm_mod_builder.py depends upon the mandatory '-p $PROTO_IDENT' and '-m
+$FABRIC_MOD_name' parameters, and actually running the script looks like:
+
+target:/mnt/sdb/lio-core-2.6.git/Documentation/target# python tcm_mod_builder.py -p iSCSI -m tcm_nab5000
+tcm_dir: /mnt/sdb/lio-core-2.6.git/Documentation/target/../../
+Set fabric_mod_name: tcm_nab5000
+Set fabric_mod_dir:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
+Using proto_ident: iSCSI
+Creating fabric_mod_dir:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_base.h
+Using tcm_mod_scan_fabric_ops:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../include/target/target_core_fabric_ops.h
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.c
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.h
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_configfs.c
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kbuild
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kconfig
+Would you like to add tcm_nab5000to drivers/target/Kbuild..? [yes,no]: yes
+Would you like to add tcm_nab5000to drivers/target/Kconfig..? [yes,no]: yes
+
+At the end of tcm_mod_builder.py. the script will ask to add the following
+line to drivers/target/Kbuild:
+
+	obj-$(CONFIG_TCM_NAB5000)       += tcm_nab5000/
+
+and the same for drivers/target/Kconfig:
+
+	source "drivers/target/tcm_nab5000/Kconfig"
+
+*) Run 'make menuconfig' and select the new CONFIG_TCM_NAB5000 item:
+
+	<M>   TCM_NAB5000 fabric module
+
+*) Build using 'make modules', once completed you will have:
+
+target:/mnt/sdb/lio-core-2.6.git# ls -la drivers/target/tcm_nab5000/
+total 1348
+drwxr-xr-x 2 root root   4096 2010-10-05 03:23 .
+drwxr-xr-x 9 root root   4096 2010-10-05 03:22 ..
+-rw-r--r-- 1 root root    282 2010-10-05 03:22 Kbuild
+-rw-r--r-- 1 root root    171 2010-10-05 03:22 Kconfig
+-rw-r--r-- 1 root root     49 2010-10-05 03:23 modules.order
+-rw-r--r-- 1 root root    738 2010-10-05 03:22 tcm_nab5000_base.h
+-rw-r--r-- 1 root root   9096 2010-10-05 03:22 tcm_nab5000_configfs.c
+-rw-r--r-- 1 root root 191200 2010-10-05 03:23 tcm_nab5000_configfs.o
+-rw-r--r-- 1 root root  40504 2010-10-05 03:23 .tcm_nab5000_configfs.o.cmd
+-rw-r--r-- 1 root root   5414 2010-10-05 03:22 tcm_nab5000_fabric.c
+-rw-r--r-- 1 root root   2016 2010-10-05 03:22 tcm_nab5000_fabric.h
+-rw-r--r-- 1 root root 190932 2010-10-05 03:23 tcm_nab5000_fabric.o
+-rw-r--r-- 1 root root  40713 2010-10-05 03:23 .tcm_nab5000_fabric.o.cmd
+-rw-r--r-- 1 root root 401861 2010-10-05 03:23 tcm_nab5000.ko
+-rw-r--r-- 1 root root    265 2010-10-05 03:23 .tcm_nab5000.ko.cmd
+-rw-r--r-- 1 root root    459 2010-10-05 03:23 tcm_nab5000.mod.c
+-rw-r--r-- 1 root root  23896 2010-10-05 03:23 tcm_nab5000.mod.o
+-rw-r--r-- 1 root root  22655 2010-10-05 03:23 .tcm_nab5000.mod.o.cmd
+-rw-r--r-- 1 root root 379022 2010-10-05 03:23 tcm_nab5000.o
+-rw-r--r-- 1 root root    211 2010-10-05 03:23 .tcm_nab5000.o.cmd
+
+*) Load the new module, create a lun_0 configfs group, and add new TCM Core
+   IBLOCK backstore symlink to port:
+
+target:/mnt/sdb/lio-core-2.6.git# insmod drivers/target/tcm_nab5000.ko
+target:/mnt/sdb/lio-core-2.6.git# mkdir -p /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0
+target:/mnt/sdb/lio-core-2.6.git# cd /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0/
+target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# ln -s /sys/kernel/config/target/core/iblock_0/lvm_test0 nab5000_port
+
+target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# cd -
+target:/mnt/sdb/lio-core-2.6.git# tree /sys/kernel/config/target/nab5000/
+/sys/kernel/config/target/nab5000/
+|-- discovery_auth
+|-- iqn.foo
+|   `-- tpgt_1
+|       |-- acls
+|       |-- attrib
+|       |-- lun
+|       |   `-- lun_0
+|       |       |-- alua_tg_pt_gp
+|       |       |-- alua_tg_pt_offline
+|       |       |-- alua_tg_pt_status
+|       |       |-- alua_tg_pt_write_md
+|	|	`-- nab5000_port -> ../../../../../../target/core/iblock_0/lvm_test0
+|       |-- np
+|       `-- param
+`-- version
+
+target:/mnt/sdb/lio-core-2.6.git# lsmod
+Module                  Size  Used by
+tcm_nab5000             3935  4
+iscsi_target_mod      193211  0
+target_core_stgt        8090  0
+target_core_pscsi      11122  1
+target_core_file        9172  2
+target_core_iblock      9280  1
+target_core_mod       228575  31
+tcm_nab5000,iscsi_target_mod,target_core_stgt,target_core_pscsi,target_core_file,target_core_iblock
+libfc                  73681  0
+scsi_debug             56265  0
+scsi_tgt                8666  1 target_core_stgt
+configfs               20644  2 target_core_mod
+
+----------------------------------------------------------------------
+
+Future TODO items:
+
+	*) Add more T10 proto_idents
+	*) Make tcm_mod_dump_fabric_ops() smarter and generate function pointer
+	   defs directly from include/target/target_core_fabric_ops.h:struct target_core_fabric_ops
+	   structure members.
+
+October 5th, 2010
+Nicholas A. Bellinger <nab@linux-iscsi.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 89e4d4b..1af022e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3684,7 +3684,7 @@
 
 KMEMCHECK
 M:	Vegard Nossum <vegardno@ifi.uio.no>
-M:	Pekka Enberg <penberg@cs.helsinki.fi>
+M:	Pekka Enberg <penberg@kernel.org>
 S:	Maintained
 F:	Documentation/kmemcheck.txt
 F:	arch/x86/include/asm/kmemcheck.h
@@ -5646,7 +5646,7 @@
 
 SLAB ALLOCATOR
 M:	Christoph Lameter <cl@linux-foundation.org>
-M:	Pekka Enberg <penberg@cs.helsinki.fi>
+M:	Pekka Enberg <penberg@kernel.org>
 M:	Matt Mackall <mpm@selenic.com>
 L:	linux-mm@kvack.org
 S:	Maintained
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 943fe69..fc95ee1 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -68,6 +68,9 @@
 	bool
 	default n
 
+config GENERIC_HARDIRQS_NO__DO_IRQ
+	def_bool y
+
 config GENERIC_HARDIRQS
 	bool
 	default y
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index eda9b90..56ff965 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -37,8 +37,9 @@
  */
 extern inline void __set_hae(unsigned long new_hae)
 {
-	unsigned long flags;
-	local_irq_save(flags);
+	unsigned long flags = swpipl(IPL_MAX);
+
+	barrier();
 
 	alpha_mv.hae_cache = new_hae;
 	*alpha_mv.hae_register = new_hae;
@@ -46,7 +47,8 @@
 	/* Re-read to make sure it was written.  */
 	new_hae = *alpha_mv.hae_register;
 
-	local_irq_restore(flags);
+	setipl(flags);
+	barrier();
 }
 
 extern inline void set_hae(unsigned long new_hae)
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 1ee9b5b..9bb7b858 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -3,8 +3,8 @@
 #
 
 extra-y		:= head.o vmlinux.lds
-EXTRA_AFLAGS	:= $(KBUILD_CFLAGS)
-EXTRA_CFLAGS	:= -Werror -Wno-sign-compare
+asflags-y	:= $(KBUILD_CFLAGS)
+ccflags-y	:= -Werror -Wno-sign-compare
 
 obj-y    := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
 	    irq_alpha.o signal.o setup.o ptrace.o time.o \
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index fe91298..9ab234f 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -44,10 +44,11 @@
 
 int irq_select_affinity(unsigned int irq)
 {
+	struct irq_desc *desc = irq_to_desc[irq];
 	static int last_cpu;
 	int cpu = last_cpu + 1;
 
-	if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
+	if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
 		return 1;
 
 	while (!cpu_possible(cpu) ||
@@ -55,8 +56,8 @@
 		cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
 	last_cpu = cpu;
 
-	cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
-	irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
+	cpumask_copy(desc->affinity, cpumask_of(cpu));
+	get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
 	return 0;
 }
 #endif /* CONFIG_SMP */
@@ -67,6 +68,7 @@
 	int j;
 	int irq = *(loff_t *) v;
 	struct irqaction * action;
+	struct irq_desc *desc;
 	unsigned long flags;
 
 #ifdef CONFIG_SMP
@@ -79,8 +81,13 @@
 #endif
 
 	if (irq < ACTUAL_NR_IRQS) {
-		raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
-		action = irq_desc[irq].action;
+		desc = irq_to_desc(irq);
+
+		if (!desc)
+			return 0;
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+		action = desc->action;
 		if (!action) 
 			goto unlock;
 		seq_printf(p, "%3d: ", irq);
@@ -90,7 +97,7 @@
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j));
 #endif
-		seq_printf(p, " %14s", irq_desc[irq].chip->name);
+		seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
 		seq_printf(p, "  %c%s",
 			(action->flags & IRQF_DISABLED)?'+':' ',
 			action->name);
@@ -103,7 +110,7 @@
 
 		seq_putc(p, '\n');
 unlock:
-		raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	} else if (irq == ACTUAL_NR_IRQS) {
 #ifdef CONFIG_SMP
 		seq_puts(p, "IPI: ");
@@ -142,8 +149,10 @@
 	 * handled by some other CPU. (or is disabled)
 	 */
 	static unsigned int illegal_count=0;
+	struct irq_desc *desc = irq_to_desc(irq);
 	
-	if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
+	if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS &&
+	    illegal_count < MAX_ILLEGAL_IRQS)) {
 		irq_err_count++;
 		illegal_count++;
 		printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
@@ -151,14 +160,14 @@
 		return;
 	}
 
-	irq_enter();
 	/*
-	 * __do_IRQ() must be called with IPL_MAX. Note that we do not
+	 * From here we must proceed with IPL_MAX. Note that we do not
 	 * explicitly enable interrupts afterwards - some MILO PALcode
 	 * (namely LX164 one) seems to have severe problems with RTI
 	 * at IPL 0.
 	 */
 	local_irq_disable();
-	__do_IRQ(irq);
+	irq_enter();
+	generic_handle_irq_desc(irq, desc);
 	irq_exit();
 }
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 4c8bb37..2d0679b 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -219,31 +219,23 @@
  * processed by PALcode, and comes in via entInt vector 1.
  */
 
-static void rtc_enable_disable(unsigned int irq) { }
-static unsigned int rtc_startup(unsigned int irq) { return 0; }
-
 struct irqaction timer_irqaction = {
 	.handler	= timer_interrupt,
 	.flags		= IRQF_DISABLED,
 	.name		= "timer",
 };
 
-static struct irq_chip rtc_irq_type = {
-	.name		= "RTC",
-	.startup	= rtc_startup,
-	.shutdown	= rtc_enable_disable,
-	.enable		= rtc_enable_disable,
-	.disable	= rtc_enable_disable,
-	.ack		= rtc_enable_disable,
-	.end		= rtc_enable_disable,
-};
-
 void __init
 init_rtc_irq(void)
 {
-	irq_desc[RTC_IRQ].status = IRQ_DISABLED;
-	irq_desc[RTC_IRQ].chip = &rtc_irq_type;
-	setup_irq(RTC_IRQ, &timer_irqaction);
+	struct irq_desc *desc = irq_to_desc(RTC_IRQ);
+
+	if (desc) {
+		desc->status |= IRQ_DISABLED;
+		set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+			handle_simple_irq, "RTC");
+		setup_irq(RTC_IRQ, &timer_irqaction);
+	}
 }
 
 /* Dummy irqactions.  */
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 83a9ac2..956ea0e 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -69,28 +69,11 @@
 	spin_unlock(&i8259_irq_lock);
 }
 
-unsigned int
-i8259a_startup_irq(unsigned int irq)
-{
-	i8259a_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-void
-i8259a_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		i8259a_enable_irq(irq);
-}
-
 struct irq_chip i8259a_irq_type = {
 	.name		= "XT-PIC",
-	.startup	= i8259a_startup_irq,
-	.shutdown	= i8259a_disable_irq,
-	.enable		= i8259a_enable_irq,
-	.disable	= i8259a_disable_irq,
-	.ack		= i8259a_mask_and_ack_irq,
-	.end		= i8259a_end_irq,
+	.unmask		= i8259a_enable_irq,
+	.mask		= i8259a_disable_irq,
+	.mask_ack	= i8259a_mask_and_ack_irq,
 };
 
 void __init
@@ -107,8 +90,7 @@
 	outb(0xff, 0xA1);	/* mask all of 8259A-2 */
 
 	for (i = 0; i < 16; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].chip = &i8259a_irq_type;
+		set_irq_chip_and_handler(i, &i8259a_irq_type, handle_level_irq);
 	}
 
 	setup_irq(2, &cascade);
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index 989ce46..2863458 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -40,20 +40,6 @@
 	pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
 }
 
-static unsigned int
-pyxis_startup_irq(unsigned int irq)
-{
-	pyxis_enable_irq(irq);
-	return 0;
-}
-
-static void
-pyxis_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		pyxis_enable_irq(irq);
-}
-
 static void
 pyxis_mask_and_ack_irq(unsigned int irq)
 {
@@ -72,12 +58,9 @@
 
 static struct irq_chip pyxis_irq_type = {
 	.name		= "PYXIS",
-	.startup	= pyxis_startup_irq,
-	.shutdown	= pyxis_disable_irq,
-	.enable		= pyxis_enable_irq,
-	.disable	= pyxis_disable_irq,
-	.ack		= pyxis_mask_and_ack_irq,
-	.end		= pyxis_end_irq,
+	.mask_ack	= pyxis_mask_and_ack_irq,
+	.mask		= pyxis_disable_irq,
+	.unmask		= pyxis_enable_irq,
 };
 
 void 
@@ -119,8 +102,8 @@
 	for (i = 16; i < 48; ++i) {
 		if ((ignore_mask >> i) & 1)
 			continue;
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &pyxis_irq_type;
+		set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
+		irq_to_desc(i)->status |= IRQ_LEVEL;
 	}
 
 	setup_irq(16+7, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index d63e93e..0e57e82 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -33,29 +33,12 @@
 	spin_unlock(&srm_irq_lock);
 }
 
-static unsigned int
-srm_startup_irq(unsigned int irq)
-{
-	srm_enable_irq(irq);
-	return 0;
-}
-
-static void
-srm_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		srm_enable_irq(irq);
-}
-
 /* Handle interrupts from the SRM, assuming no additional weirdness.  */
 static struct irq_chip srm_irq_type = {
 	.name		= "SRM",
-	.startup	= srm_startup_irq,
-	.shutdown	= srm_disable_irq,
-	.enable		= srm_enable_irq,
-	.disable	= srm_disable_irq,
-	.ack		= srm_disable_irq,
-	.end		= srm_end_irq,
+	.unmask		= srm_enable_irq,
+	.mask		= srm_disable_irq,
+	.mask_ack	= srm_disable_irq,
 };
 
 void __init
@@ -68,8 +51,8 @@
 	for (i = 16; i < max; ++i) {
 		if (i < 64 && ((ignore_mask >> i) & 1))
 			continue;
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &srm_irq_type;
+		set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
+		irq_to_desc(i)->status |= IRQ_LEVEL;
 	}
 }
 
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 547e8b8..fe698b5 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -951,9 +951,6 @@
 	return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0);
 }
 
-#define MAX_SELECT_SECONDS \
-	((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
-
 SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
 		fd_set __user *, exp, struct timeval32 __user *, tvp)
 {
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 20a30b8..7bef617 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -65,13 +65,6 @@
 	*(vuip)GRU_INT_CLEAR = 0; mb();
 }
 
-static unsigned int
-alcor_startup_irq(unsigned int irq)
-{
-	alcor_enable_irq(irq);
-	return 0;
-}
-
 static void
 alcor_isa_mask_and_ack_irq(unsigned int irq)
 {
@@ -82,21 +75,11 @@
 	*(vuip)GRU_INT_CLEAR = 0; mb();
 }
 
-static void
-alcor_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		alcor_enable_irq(irq);
-}
-
 static struct irq_chip alcor_irq_type = {
 	.name		= "ALCOR",
-	.startup	= alcor_startup_irq,
-	.shutdown	= alcor_disable_irq,
-	.enable		= alcor_enable_irq,
-	.disable	= alcor_disable_irq,
-	.ack		= alcor_mask_and_ack_irq,
-	.end		= alcor_end_irq,
+	.unmask		= alcor_enable_irq,
+	.mask		= alcor_disable_irq,
+	.mask_ack	= alcor_mask_and_ack_irq,
 };
 
 static void
@@ -142,8 +125,8 @@
 		   on while IRQ probing.  */
 		if (i >= 16+20 && i <= 16+30)
 			continue;
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &alcor_irq_type;
+		set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
+		irq_to_desc(i)->status |= IRQ_LEVEL;
 	}
 	i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
 
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 14c8898..b0c9164 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -57,28 +57,11 @@
 	cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
 }
 
-static unsigned int
-cabriolet_startup_irq(unsigned int irq)
-{ 
-	cabriolet_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-cabriolet_end_irq(unsigned int irq)
-{ 
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		cabriolet_enable_irq(irq);
-}
-
 static struct irq_chip cabriolet_irq_type = {
 	.name		= "CABRIOLET",
-	.startup	= cabriolet_startup_irq,
-	.shutdown	= cabriolet_disable_irq,
-	.enable		= cabriolet_enable_irq,
-	.disable	= cabriolet_disable_irq,
-	.ack		= cabriolet_disable_irq,
-	.end		= cabriolet_end_irq,
+	.unmask		= cabriolet_enable_irq,
+	.mask		= cabriolet_disable_irq,
+	.mask_ack	= cabriolet_disable_irq,
 };
 
 static void 
@@ -122,8 +105,9 @@
 		outb(0xff, 0x806);
 
 		for (i = 16; i < 35; ++i) {
-			irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-			irq_desc[i].chip = &cabriolet_irq_type;
+			set_irq_chip_and_handler(i, &cabriolet_irq_type,
+				handle_level_irq);
+			irq_to_desc(i)->status |= IRQ_LEVEL;
 		}
 	}
 
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 4026502..edad5f7 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -115,20 +115,6 @@
 	spin_unlock(&dp264_irq_lock);
 }
 
-static unsigned int
-dp264_startup_irq(unsigned int irq)
-{ 
-	dp264_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-dp264_end_irq(unsigned int irq)
-{ 
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		dp264_enable_irq(irq);
-}
-
 static void
 clipper_enable_irq(unsigned int irq)
 {
@@ -147,20 +133,6 @@
 	spin_unlock(&dp264_irq_lock);
 }
 
-static unsigned int
-clipper_startup_irq(unsigned int irq)
-{ 
-	clipper_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-clipper_end_irq(unsigned int irq)
-{ 
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		clipper_enable_irq(irq);
-}
-
 static void
 cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 {
@@ -200,23 +172,17 @@
 
 static struct irq_chip dp264_irq_type = {
 	.name		= "DP264",
-	.startup	= dp264_startup_irq,
-	.shutdown	= dp264_disable_irq,
-	.enable		= dp264_enable_irq,
-	.disable	= dp264_disable_irq,
-	.ack		= dp264_disable_irq,
-	.end		= dp264_end_irq,
+	.unmask		= dp264_enable_irq,
+	.mask		= dp264_disable_irq,
+	.mask_ack	= dp264_disable_irq,
 	.set_affinity	= dp264_set_affinity,
 };
 
 static struct irq_chip clipper_irq_type = {
 	.name		= "CLIPPER",
-	.startup	= clipper_startup_irq,
-	.shutdown	= clipper_disable_irq,
-	.enable		= clipper_enable_irq,
-	.disable	= clipper_disable_irq,
-	.ack		= clipper_disable_irq,
-	.end		= clipper_end_irq,
+	.unmask		= clipper_enable_irq,
+	.mask		= clipper_disable_irq,
+	.mask_ack	= clipper_disable_irq,
 	.set_affinity	= clipper_set_affinity,
 };
 
@@ -302,8 +268,8 @@
 {
 	long i;
 	for (i = imin; i <= imax; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = ops;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, ops, handle_level_irq);
 	}
 }
 
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index df2090c..ae5f29d 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -55,28 +55,11 @@
 	eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
 }
 
-static unsigned int
-eb64p_startup_irq(unsigned int irq)
-{
-	eb64p_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-eb64p_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		eb64p_enable_irq(irq);
-}
-
 static struct irq_chip eb64p_irq_type = {
 	.name		= "EB64P",
-	.startup	= eb64p_startup_irq,
-	.shutdown	= eb64p_disable_irq,
-	.enable		= eb64p_enable_irq,
-	.disable	= eb64p_disable_irq,
-	.ack		= eb64p_disable_irq,
-	.end		= eb64p_end_irq,
+	.unmask		= eb64p_enable_irq,
+	.mask		= eb64p_disable_irq,
+	.mask_ack	= eb64p_disable_irq,
 };
 
 static void 
@@ -135,8 +118,8 @@
 	init_i8259a_irqs();
 
 	for (i = 16; i < 32; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &eb64p_irq_type;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
 	}		
 
 	common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 3ca1dbc..1121bc5 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -66,28 +66,11 @@
 	eiger_update_irq_hw(irq, mask);
 }
 
-static unsigned int
-eiger_startup_irq(unsigned int irq)
-{
-	eiger_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-eiger_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		eiger_enable_irq(irq);
-}
-
 static struct irq_chip eiger_irq_type = {
 	.name		= "EIGER",
-	.startup	= eiger_startup_irq,
-	.shutdown	= eiger_disable_irq,
-	.enable		= eiger_enable_irq,
-	.disable	= eiger_disable_irq,
-	.ack		= eiger_disable_irq,
-	.end		= eiger_end_irq,
+	.unmask		= eiger_enable_irq,
+	.mask		= eiger_disable_irq,
+	.mask_ack	= eiger_disable_irq,
 };
 
 static void
@@ -153,8 +136,8 @@
 	init_i8259a_irqs();
 
 	for (i = 16; i < 128; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &eiger_irq_type;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
 	}
 }
 
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 7a7ae36..34f55e0 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -62,30 +62,6 @@
  * world.
  */
 
-static unsigned int
-jensen_local_startup(unsigned int irq)
-{
-	/* the parport is really hw IRQ 1, silly Jensen.  */
-	if (irq == 7)
-		i8259a_startup_irq(1);
-	else
-		/*
-		 * For all true local interrupts, set the flag that prevents
-		 * the IPL from being dropped during handler processing.
-		 */
-		if (irq_desc[irq].action)
-			irq_desc[irq].action->flags |= IRQF_DISABLED;
-	return 0;
-}
-
-static void
-jensen_local_shutdown(unsigned int irq)
-{
-	/* the parport is really hw IRQ 1, silly Jensen.  */
-	if (irq == 7)
-		i8259a_disable_irq(1);
-}
-
 static void
 jensen_local_enable(unsigned int irq)
 {
@@ -103,29 +79,18 @@
 }
 
 static void
-jensen_local_ack(unsigned int irq)
+jensen_local_mask_ack(unsigned int irq)
 {
 	/* the parport is really hw IRQ 1, silly Jensen.  */
 	if (irq == 7)
 		i8259a_mask_and_ack_irq(1);
 }
 
-static void
-jensen_local_end(unsigned int irq)
-{
-	/* the parport is really hw IRQ 1, silly Jensen.  */
-	if (irq == 7)
-		i8259a_end_irq(1);
-}
-
 static struct irq_chip jensen_local_irq_type = {
 	.name		= "LOCAL",
-	.startup	= jensen_local_startup,
-	.shutdown	= jensen_local_shutdown,
-	.enable		= jensen_local_enable,
-	.disable	= jensen_local_disable,
-	.ack		= jensen_local_ack,
-	.end		= jensen_local_end,
+	.unmask		= jensen_local_enable,
+	.mask		= jensen_local_disable,
+	.mask_ack	= jensen_local_mask_ack,
 };
 
 static void 
@@ -158,7 +123,7 @@
 	}
 
 	/* If there is no handler yet... */
-	if (irq_desc[irq].action == NULL) {
+	if (!irq_has_action(irq)) {
 	    /* If it is a local interrupt that cannot be masked... */
 	    if (vector >= 0x900)
 	    {
@@ -206,11 +171,11 @@
 {
 	init_i8259a_irqs();
 
-	irq_desc[1].chip = &jensen_local_irq_type;
-	irq_desc[4].chip = &jensen_local_irq_type;
-	irq_desc[3].chip = &jensen_local_irq_type;
-	irq_desc[7].chip = &jensen_local_irq_type;
-	irq_desc[9].chip = &jensen_local_irq_type;
+	set_irq_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq);
+	set_irq_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq);
+	set_irq_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq);
+	set_irq_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq);
+	set_irq_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq);
 
 	common_init_isa_dma();
 }
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 0bb3b5c..2bfc9f1 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -143,20 +143,6 @@
 	spin_unlock(&io7->irq_lock);
 }
 
-static unsigned int
-io7_startup_irq(unsigned int irq)
-{
-	io7_enable_irq(irq);
-	return 0;	/* never anything pending */
-}
-
-static void
-io7_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		io7_enable_irq(irq);
-}
-
 static void
 marvel_irq_noop(unsigned int irq) 
 { 
@@ -171,32 +157,22 @@
 
 static struct irq_chip marvel_legacy_irq_type = {
 	.name		= "LEGACY",
-	.startup	= marvel_irq_noop_return,
-	.shutdown	= marvel_irq_noop,
-	.enable		= marvel_irq_noop,
-	.disable	= marvel_irq_noop,
-	.ack		= marvel_irq_noop,
-	.end		= marvel_irq_noop,
+	.mask		= marvel_irq_noop,
+	.unmask		= marvel_irq_noop,
 };
 
 static struct irq_chip io7_lsi_irq_type = {
 	.name		= "LSI",
-	.startup	= io7_startup_irq,
-	.shutdown	= io7_disable_irq,
-	.enable		= io7_enable_irq,
-	.disable	= io7_disable_irq,
-	.ack		= io7_disable_irq,
-	.end		= io7_end_irq,
+	.unmask		= io7_enable_irq,
+	.mask		= io7_disable_irq,
+	.mask_ack	= io7_disable_irq,
 };
 
 static struct irq_chip io7_msi_irq_type = {
 	.name		= "MSI",
-	.startup	= io7_startup_irq,
-	.shutdown	= io7_disable_irq,
-	.enable		= io7_enable_irq,
-	.disable	= io7_disable_irq,
+	.unmask		= io7_enable_irq,
+	.mask		= io7_disable_irq,
 	.ack		= marvel_irq_noop,
-	.end		= io7_end_irq,
 };
 
 static void
@@ -304,8 +280,8 @@
 
 	/* Set up the lsi irqs.  */
 	for (i = 0; i < 128; ++i) {
-		irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[base + i].chip = lsi_ops;
+		irq_to_desc(base + i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
 	}
 
 	/* Disable the implemented irqs in hardware.  */
@@ -318,8 +294,8 @@
 
 	/* Set up the msi irqs.  */
 	for (i = 128; i < (128 + 512); ++i) {
-		irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[base + i].chip = msi_ops;
+		irq_to_desc(base + i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
 	}
 
 	for (i = 0; i < 16; ++i)
@@ -336,8 +312,8 @@
 
 	/* Reserve the legacy irqs.  */
 	for (i = 0; i < 16; ++i) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].chip = &marvel_legacy_irq_type;
+		set_irq_chip_and_handler(i, &marvel_legacy_irq_type,
+			handle_level_irq);
 	}
 
 	/* Init the io7 irqs.  */
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index ee88651..bcc1639 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -54,28 +54,11 @@
 	mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
 }
 
-static unsigned int
-mikasa_startup_irq(unsigned int irq)
-{
-	mikasa_enable_irq(irq);
-	return 0;
-}
-
-static void
-mikasa_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		mikasa_enable_irq(irq);
-}
-
 static struct irq_chip mikasa_irq_type = {
 	.name		= "MIKASA",
-	.startup	= mikasa_startup_irq,
-	.shutdown	= mikasa_disable_irq,
-	.enable		= mikasa_enable_irq,
-	.disable	= mikasa_disable_irq,
-	.ack		= mikasa_disable_irq,
-	.end		= mikasa_end_irq,
+	.unmask		= mikasa_enable_irq,
+	.mask		= mikasa_disable_irq,
+	.mask_ack	= mikasa_disable_irq,
 };
 
 static void 
@@ -115,8 +98,8 @@
 	mikasa_update_irq_hw(0);
 
 	for (i = 16; i < 32; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &mikasa_irq_type;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
 	}
 
 	init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 86503fe..e88f4ae 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -59,28 +59,11 @@
 	noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
 }
 
-static unsigned int
-noritake_startup_irq(unsigned int irq)
-{
-	noritake_enable_irq(irq);
-	return 0;
-}
-
-static void
-noritake_end_irq(unsigned int irq)
-{
-        if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-                noritake_enable_irq(irq);
-}
-
 static struct irq_chip noritake_irq_type = {
 	.name		= "NORITAKE",
-	.startup	= noritake_startup_irq,
-	.shutdown	= noritake_disable_irq,
-	.enable		= noritake_enable_irq,
-	.disable	= noritake_disable_irq,
-	.ack		= noritake_disable_irq,
-	.end		= noritake_end_irq,
+	.unmask		= noritake_enable_irq,
+	.mask		= noritake_disable_irq,
+	.mask_ack	= noritake_disable_irq,
 };
 
 static void 
@@ -144,8 +127,8 @@
 	outw(0, 0x54c);
 
 	for (i = 16; i < 48; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &noritake_irq_type;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
 	}
 
 	init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 26c322b..6a51364 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -121,28 +121,11 @@
 	spin_unlock(&rawhide_irq_lock);
 }
 
-static unsigned int
-rawhide_startup_irq(unsigned int irq)
-{
-	rawhide_enable_irq(irq);
-	return 0;
-}
-
-static void
-rawhide_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		rawhide_enable_irq(irq);
-}
-
 static struct irq_chip rawhide_irq_type = {
 	.name		= "RAWHIDE",
-	.startup	= rawhide_startup_irq,
-	.shutdown	= rawhide_disable_irq,
-	.enable		= rawhide_enable_irq,
-	.disable	= rawhide_disable_irq,
-	.ack		= rawhide_mask_and_ack_irq,
-	.end		= rawhide_end_irq,
+	.unmask		= rawhide_enable_irq,
+	.mask		= rawhide_disable_irq,
+	.mask_ack	= rawhide_mask_and_ack_irq,
 };
 
 static void 
@@ -194,8 +177,8 @@
 	}
 
 	for (i = 16; i < 128; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &rawhide_irq_type;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
 	}
 
 	init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index be16112..89e7e37e 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -58,28 +58,11 @@
 	rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
 }
 
-static unsigned int
-rx164_startup_irq(unsigned int irq)
-{
-	rx164_enable_irq(irq);
-	return 0;
-}
-
-static void
-rx164_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		rx164_enable_irq(irq);
-}
-
 static struct irq_chip rx164_irq_type = {
 	.name		= "RX164",
-	.startup	= rx164_startup_irq,
-	.shutdown	= rx164_disable_irq,
-	.enable		= rx164_enable_irq,
-	.disable	= rx164_disable_irq,
-	.ack		= rx164_disable_irq,
-	.end		= rx164_end_irq,
+	.unmask		= rx164_enable_irq,
+	.mask		= rx164_disable_irq,
+	.mask_ack	= rx164_disable_irq,
 };
 
 static void 
@@ -116,8 +99,8 @@
 
 	rx164_update_irq_hw(0);
 	for (i = 16; i < 40; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &rx164_irq_type;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
 	}
 
 	init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index b2abe27..5c4423d 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -474,20 +474,6 @@
 #endif
 }
 
-static unsigned int
-sable_lynx_startup_irq(unsigned int irq)
-{
-	sable_lynx_enable_irq(irq);
-	return 0;
-}
-
-static void
-sable_lynx_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		sable_lynx_enable_irq(irq);
-}
-
 static void
 sable_lynx_mask_and_ack_irq(unsigned int irq)
 {
@@ -503,12 +489,9 @@
 
 static struct irq_chip sable_lynx_irq_type = {
 	.name		= "SABLE/LYNX",
-	.startup	= sable_lynx_startup_irq,
-	.shutdown	= sable_lynx_disable_irq,
-	.enable		= sable_lynx_enable_irq,
-	.disable	= sable_lynx_disable_irq,
-	.ack		= sable_lynx_mask_and_ack_irq,
-	.end		= sable_lynx_end_irq,
+	.unmask		= sable_lynx_enable_irq,
+	.mask		= sable_lynx_disable_irq,
+	.mask_ack	= sable_lynx_mask_and_ack_irq,
 };
 
 static void 
@@ -535,8 +518,9 @@
 	long i;
 
 	for (i = 0; i < nr_of_irqs; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &sable_lynx_irq_type;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, &sable_lynx_irq_type,
+			handle_level_irq);
 	}
 
 	common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 4da596b..f8a1e8a 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -60,28 +60,11 @@
 	takara_update_irq_hw(irq, mask);
 }
 
-static unsigned int
-takara_startup_irq(unsigned int irq)
-{
-	takara_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-takara_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		takara_enable_irq(irq);
-}
-
 static struct irq_chip takara_irq_type = {
 	.name		= "TAKARA",
-	.startup	= takara_startup_irq,
-	.shutdown	= takara_disable_irq,
-	.enable		= takara_enable_irq,
-	.disable	= takara_disable_irq,
-	.ack		= takara_disable_irq,
-	.end		= takara_end_irq,
+	.unmask		= takara_enable_irq,
+	.mask		= takara_disable_irq,
+	.mask_ack	= takara_disable_irq,
 };
 
 static void
@@ -153,8 +136,8 @@
 		takara_update_irq_hw(i, -1);
 
 	for (i = 16; i < 128; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &takara_irq_type;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
 	}
 
 	common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 9008d0f..e02494b 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -129,20 +129,6 @@
 	spin_unlock(&titan_irq_lock);
 }
 
-static unsigned int
-titan_startup_irq(unsigned int irq)
-{
-	titan_enable_irq(irq);
-	return 0;	/* never anything pending */
-}
-
-static void
-titan_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		titan_enable_irq(irq);
-}
-
 static void
 titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 {
@@ -189,20 +175,17 @@
 {
 	long i;
 	for (i = imin; i <= imax; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = ops;
+		irq_to_desc(i)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i, ops, handle_level_irq);
 	}
 }
 
 static struct irq_chip titan_irq_type = {
-       .name	       = "TITAN",
-       .startup        = titan_startup_irq,
-       .shutdown       = titan_disable_irq,
-       .enable         = titan_enable_irq,
-       .disable        = titan_disable_irq,
-       .ack            = titan_disable_irq,
-       .end            = titan_end_irq,
-       .set_affinity   = titan_set_irq_affinity,
+       .name		= "TITAN",
+       .unmask		= titan_enable_irq,
+       .mask		= titan_disable_irq,
+       .mask_ack	= titan_disable_irq,
+       .set_affinity	= titan_set_irq_affinity,
 };
 
 static irqreturn_t
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 62fd972..eec5259 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -139,32 +139,11 @@
 	spin_unlock(&wildfire_irq_lock);
 }
 
-static unsigned int
-wildfire_startup_irq(unsigned int irq)
-{ 
-	wildfire_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-wildfire_end_irq(unsigned int irq)
-{ 
-#if 0
-	if (!irq_desc[irq].action)
-		printk("got irq %d\n", irq);
-#endif
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		wildfire_enable_irq(irq);
-}
-
 static struct irq_chip wildfire_irq_type = {
 	.name		= "WILDFIRE",
-	.startup	= wildfire_startup_irq,
-	.shutdown	= wildfire_disable_irq,
-	.enable		= wildfire_enable_irq,
-	.disable	= wildfire_disable_irq,
-	.ack		= wildfire_mask_and_ack_irq,
-	.end		= wildfire_end_irq,
+	.unmask		= wildfire_enable_irq,
+	.mask		= wildfire_disable_irq,
+	.mask_ack	= wildfire_mask_and_ack_irq,
 };
 
 static void __init
@@ -198,15 +177,18 @@
 	for (i = 0; i < 16; ++i) {
 		if (i == 2)
 			continue;
-		irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i+irq_bias].chip = &wildfire_irq_type;
+		irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
+			handle_level_irq);
 	}
 
-	irq_desc[36+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
-	irq_desc[36+irq_bias].chip = &wildfire_irq_type;
+	irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
+	set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
+		handle_level_irq);
 	for (i = 40; i < 64; ++i) {
-		irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i+irq_bias].chip = &wildfire_irq_type;
+		irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
+		set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
+			handle_level_irq);
 	}
 
 	setup_irq(32+irq_bias, &isa_enable);	
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 9b72c59..c0a83ab 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -2,8 +2,8 @@
 # Makefile for alpha-specific library files..
 #
 
-EXTRA_AFLAGS := $(KBUILD_CFLAGS)
-EXTRA_CFLAGS := -Werror
+asflags-y := $(KBUILD_CFLAGS)
+ccflags-y := -Werror
 
 # Many of these routines have implementations tuned for ev6.
 # Choose them iff we're targeting ev6 specifically.
diff --git a/arch/alpha/math-emu/Makefile b/arch/alpha/math-emu/Makefile
index 359ef08..7f46719 100644
--- a/arch/alpha/math-emu/Makefile
+++ b/arch/alpha/math-emu/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the FPU instruction emulation.
 #
 
-EXTRA_CFLAGS := -w
+ccflags-y := -w
 
 obj-$(CONFIG_MATHEMU) += math-emu.o
 
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile
index 09399c5..c993d3f 100644
--- a/arch/alpha/mm/Makefile
+++ b/arch/alpha/mm/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the linux alpha-specific parts of the memory manager.
 #
 
-EXTRA_CFLAGS := -Werror
+ccflags-y := -Werror
 
 obj-y	:= init.o fault.o extable.o
 
diff --git a/arch/alpha/oprofile/Makefile b/arch/alpha/oprofile/Makefile
index 4aa5624..3473de7 100644
--- a/arch/alpha/oprofile/Makefile
+++ b/arch/alpha/oprofile/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS := -Werror -Wno-sign-compare
+ccflags-y := -Werror -Wno-sign-compare
 
 obj-$(CONFIG_OPROFILE) += oprofile.o
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e2f8011..5cff165 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -26,6 +26,8 @@
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7))
 	select HAVE_C_RECORDMCOUNT
+	select HAVE_GENERIC_HARDIRQS
+	select HAVE_SPARSE_IRQ
 	help
 	  The ARM series is a line of low-power-consumption RISC chip designs
 	  licensed by ARM Ltd and targeted at embedded applications and
@@ -97,10 +99,6 @@
 	  <file:Documentation/mca.txt> (and especially the web page given
 	  there) before attempting to build an MCA bus kernel.
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
 config STACKTRACE_SUPPORT
 	bool
 	default y
@@ -180,9 +178,6 @@
 config ARCH_MTD_XIP
 	bool
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config ARM_L1_CACHE_SHIFT_6
 	bool
 	help
@@ -368,7 +363,7 @@
 	bool "Freescale MXS-based"
 	select GENERIC_CLOCKEVENTS
 	select ARCH_REQUIRE_GPIOLIB
-	select COMMON_CLKDEV
+	select CLKDEV_LOOKUP
 	help
 	  Support for Freescale MXS-based family of processors
 
@@ -771,6 +766,7 @@
 	select ARCH_SPARSEMEM_ENABLE
 	select GENERIC_GPIO
 	select HAVE_CLK
+	select ARCH_HAS_CPUFREQ
 	select GENERIC_CLOCKEVENTS
 	select HAVE_S3C_RTC if RTC_CLASS
 	select HAVE_S3C2410_I2C if I2C
@@ -1281,7 +1277,7 @@
 config SMP_ON_UP
 	bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
-	depends on SMP && !XIP
+	depends on SMP && !XIP_KERNEL
 	default y
 	help
 	  SMP kernels contain instructions which fail on non-SMP processors.
@@ -1452,15 +1448,6 @@
 	  Enable hardware performance counter support for perf events. If
 	  disabled, perf events will use software events only.
 
-config SPARSE_IRQ
-	def_bool n
-	help
-	  This enables support for sparse irqs. This is useful in general
-	  as most CPUs have a fairly sparse array of IRQ vectors, which
-	  the irq_desc then maps directly on to. Systems with a high
-	  number of off-chip IRQs will want to treat this as
-	  experimental until they have been independently verified.
-
 source "mm/Kconfig"
 
 config FORCE_MAX_ZONEORDER
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 0b89ef0..2243772 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -50,57 +50,56 @@
 
 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
 
-static inline void __iomem *gic_dist_base(unsigned int irq)
+static inline void __iomem *gic_dist_base(struct irq_data *d)
 {
-	struct gic_chip_data *gic_data = get_irq_chip_data(irq);
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
 	return gic_data->dist_base;
 }
 
-static inline void __iomem *gic_cpu_base(unsigned int irq)
+static inline void __iomem *gic_cpu_base(struct irq_data *d)
 {
-	struct gic_chip_data *gic_data = get_irq_chip_data(irq);
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
 	return gic_data->cpu_base;
 }
 
-static inline unsigned int gic_irq(unsigned int irq)
+static inline unsigned int gic_irq(struct irq_data *d)
 {
-	struct gic_chip_data *gic_data = get_irq_chip_data(irq);
-	return irq - gic_data->irq_offset;
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+	return d->irq - gic_data->irq_offset;
 }
 
 /*
  * Routines to acknowledge, disable and enable interrupts
  */
-static void gic_ack_irq(unsigned int irq)
+static void gic_ack_irq(struct irq_data *d)
 {
-
 	spin_lock(&irq_controller_lock);
-	writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI);
+	writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
 	spin_unlock(&irq_controller_lock);
 }
 
-static void gic_mask_irq(unsigned int irq)
+static void gic_mask_irq(struct irq_data *d)
 {
-	u32 mask = 1 << (irq % 32);
+	u32 mask = 1 << (d->irq % 32);
 
 	spin_lock(&irq_controller_lock);
-	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
+	writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
 	spin_unlock(&irq_controller_lock);
 }
 
-static void gic_unmask_irq(unsigned int irq)
+static void gic_unmask_irq(struct irq_data *d)
 {
-	u32 mask = 1 << (irq % 32);
+	u32 mask = 1 << (d->irq % 32);
 
 	spin_lock(&irq_controller_lock);
-	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
+	writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
 	spin_unlock(&irq_controller_lock);
 }
 
-static int gic_set_type(unsigned int irq, unsigned int type)
+static int gic_set_type(struct irq_data *d, unsigned int type)
 {
-	void __iomem *base = gic_dist_base(irq);
-	unsigned int gicirq = gic_irq(irq);
+	void __iomem *base = gic_dist_base(d);
+	unsigned int gicirq = gic_irq(d);
 	u32 enablemask = 1 << (gicirq % 32);
 	u32 enableoff = (gicirq / 32) * 4;
 	u32 confmask = 0x2 << ((gicirq % 16) * 2);
@@ -143,21 +142,22 @@
 }
 
 #ifdef CONFIG_SMP
-static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
+static int
+gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force)
 {
-	void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
-	unsigned int shift = (irq % 4) * 8;
+	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
+	unsigned int shift = (d->irq % 4) * 8;
 	unsigned int cpu = cpumask_first(mask_val);
 	u32 val;
 	struct irq_desc *desc;
 
 	spin_lock(&irq_controller_lock);
-	desc = irq_to_desc(irq);
+	desc = irq_to_desc(d->irq);
 	if (desc == NULL) {
 		spin_unlock(&irq_controller_lock);
 		return -EINVAL;
 	}
-	desc->node = cpu;
+	d->node = cpu;
 	val = readl(reg) & ~(0xff << shift);
 	val |= 1 << (cpu + shift);
 	writel(val, reg);
@@ -175,7 +175,7 @@
 	unsigned long status;
 
 	/* primary controller ack'ing */
-	chip->ack(irq);
+	chip->irq_ack(&desc->irq_data);
 
 	spin_lock(&irq_controller_lock);
 	status = readl(chip_data->cpu_base + GIC_CPU_INTACK);
@@ -193,17 +193,17 @@
 
  out:
 	/* primary controller unmasking */
-	chip->unmask(irq);
+	chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip gic_chip = {
-	.name		= "GIC",
-	.ack		= gic_ack_irq,
-	.mask		= gic_mask_irq,
-	.unmask		= gic_unmask_irq,
-	.set_type	= gic_set_type,
+	.name			= "GIC",
+	.irq_ack		= gic_ack_irq,
+	.irq_mask		= gic_mask_irq,
+	.irq_unmask		= gic_unmask_irq,
+	.irq_set_type		= gic_set_type,
 #ifdef CONFIG_SMP
-	.set_affinity	= gic_set_cpu,
+	.irq_set_affinity	= gic_set_cpu,
 #endif
 };
 
@@ -337,7 +337,7 @@
 
 	local_irq_save(flags);
 	irq_to_desc(irq)->status |= IRQ_NOPROBE;
-	gic_unmask_irq(irq);
+	gic_unmask_irq(irq_get_irq_data(irq));
 	local_irq_restore(flags);
 }
 
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 665ebf7..fcddd48 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -31,8 +31,10 @@
 
 #define MAX_SLOTS		21
 
-static void it8152_mask_irq(unsigned int irq)
+static void it8152_mask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
        if (irq >= IT8152_LD_IRQ(0)) {
 	       __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) |
 			    (1 << (irq - IT8152_LD_IRQ(0)))),
@@ -48,8 +50,10 @@
        }
 }
 
-static void it8152_unmask_irq(unsigned int irq)
+static void it8152_unmask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
        if (irq >= IT8152_LD_IRQ(0)) {
 	       __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) &
 			     ~(1 << (irq - IT8152_LD_IRQ(0)))),
@@ -67,9 +71,9 @@
 
 static struct irq_chip it8152_irq_chip = {
 	.name		= "it8152",
-	.ack		= it8152_mask_irq,
-	.mask		= it8152_mask_irq,
-	.unmask		= it8152_unmask_irq,
+	.irq_ack	= it8152_mask_irq,
+	.irq_mask	= it8152_mask_irq,
+	.irq_unmask	= it8152_unmask_irq,
 };
 
 void it8152_init_irq(void)
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 9dff07c..a026a6b 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -144,7 +144,7 @@
 	int req, i;
 
 	/* Acknowledge the parent IRQ */
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	/* check why this interrupt was generated */
 	req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00;
@@ -161,33 +161,33 @@
 	}
 }
 
-static void locomo_ack_irq(unsigned int irq)
+static void locomo_ack_irq(struct irq_data *d)
 {
 }
 
-static void locomo_mask_irq(unsigned int irq)
+static void locomo_mask_irq(struct irq_data *d)
 {
-	struct locomo *lchip = get_irq_chip_data(irq);
+	struct locomo *lchip = irq_data_get_irq_chip_data(d);
 	unsigned int r;
 	r = locomo_readl(lchip->base + LOCOMO_ICR);
-	r &= ~(0x0010 << (irq - lchip->irq_base));
+	r &= ~(0x0010 << (d->irq - lchip->irq_base));
 	locomo_writel(r, lchip->base + LOCOMO_ICR);
 }
 
-static void locomo_unmask_irq(unsigned int irq)
+static void locomo_unmask_irq(struct irq_data *d)
 {
-	struct locomo *lchip = get_irq_chip_data(irq);
+	struct locomo *lchip = irq_data_get_irq_chip_data(d);
 	unsigned int r;
 	r = locomo_readl(lchip->base + LOCOMO_ICR);
-	r |= (0x0010 << (irq - lchip->irq_base));
+	r |= (0x0010 << (d->irq - lchip->irq_base));
 	locomo_writel(r, lchip->base + LOCOMO_ICR);
 }
 
 static struct irq_chip locomo_chip = {
-	.name	= "LOCOMO",
-	.ack	= locomo_ack_irq,
-	.mask	= locomo_mask_irq,
-	.unmask	= locomo_unmask_irq,
+	.name		= "LOCOMO",
+	.irq_ack	= locomo_ack_irq,
+	.irq_mask	= locomo_mask_irq,
+	.irq_unmask	= locomo_unmask_irq,
 };
 
 static void locomo_setup_irq(struct locomo *lchip)
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index c0258a8..eb9796b 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -210,7 +210,7 @@
 
 	sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0);
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
 
@@ -228,35 +228,35 @@
 			generic_handle_irq(i + sachip->irq_base);
 
 	/* For level-based interrupts */
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 #define SA1111_IRQMASK_LO(x)	(1 << (x - sachip->irq_base))
 #define SA1111_IRQMASK_HI(x)	(1 << (x - sachip->irq_base - 32))
 
-static void sa1111_ack_irq(unsigned int irq)
+static void sa1111_ack_irq(struct irq_data *d)
 {
 }
 
-static void sa1111_mask_lowirq(unsigned int irq)
+static void sa1111_mask_lowirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
 	unsigned long ie0;
 
 	ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
-	ie0 &= ~SA1111_IRQMASK_LO(irq);
+	ie0 &= ~SA1111_IRQMASK_LO(d->irq);
 	writel(ie0, mapbase + SA1111_INTEN0);
 }
 
-static void sa1111_unmask_lowirq(unsigned int irq)
+static void sa1111_unmask_lowirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
 	unsigned long ie0;
 
 	ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
-	ie0 |= SA1111_IRQMASK_LO(irq);
+	ie0 |= SA1111_IRQMASK_LO(d->irq);
 	sa1111_writel(ie0, mapbase + SA1111_INTEN0);
 }
 
@@ -267,11 +267,11 @@
  * be triggered.  In fact, its very difficult, if not impossible to get
  * INTSET to re-trigger the interrupt.
  */
-static int sa1111_retrigger_lowirq(unsigned int irq)
+static int sa1111_retrigger_lowirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_LO(irq);
+	unsigned int mask = SA1111_IRQMASK_LO(d->irq);
 	unsigned long ip0;
 	int i;
 
@@ -279,21 +279,21 @@
 	for (i = 0; i < 8; i++) {
 		sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0);
 		sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
-		if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask)
+		if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
 			break;
 	}
 
 	if (i == 8)
 		printk(KERN_ERR "Danger Will Robinson: failed to "
-			"re-trigger IRQ%d\n", irq);
+			"re-trigger IRQ%d\n", d->irq);
 	return i == 8 ? -1 : 0;
 }
 
-static int sa1111_type_lowirq(unsigned int irq, unsigned int flags)
+static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_LO(irq);
+	unsigned int mask = SA1111_IRQMASK_LO(d->irq);
 	unsigned long ip0;
 
 	if (flags == IRQ_TYPE_PROBE)
@@ -313,11 +313,11 @@
 	return 0;
 }
 
-static int sa1111_wake_lowirq(unsigned int irq, unsigned int on)
+static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_LO(irq);
+	unsigned int mask = SA1111_IRQMASK_LO(d->irq);
 	unsigned long we0;
 
 	we0 = sa1111_readl(mapbase + SA1111_WAKEEN0);
@@ -332,33 +332,33 @@
 
 static struct irq_chip sa1111_low_chip = {
 	.name		= "SA1111-l",
-	.ack		= sa1111_ack_irq,
-	.mask		= sa1111_mask_lowirq,
-	.unmask		= sa1111_unmask_lowirq,
-	.retrigger	= sa1111_retrigger_lowirq,
-	.set_type	= sa1111_type_lowirq,
-	.set_wake	= sa1111_wake_lowirq,
+	.irq_ack	= sa1111_ack_irq,
+	.irq_mask	= sa1111_mask_lowirq,
+	.irq_unmask	= sa1111_unmask_lowirq,
+	.irq_retrigger	= sa1111_retrigger_lowirq,
+	.irq_set_type	= sa1111_type_lowirq,
+	.irq_set_wake	= sa1111_wake_lowirq,
 };
 
-static void sa1111_mask_highirq(unsigned int irq)
+static void sa1111_mask_highirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
 	unsigned long ie1;
 
 	ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
-	ie1 &= ~SA1111_IRQMASK_HI(irq);
+	ie1 &= ~SA1111_IRQMASK_HI(d->irq);
 	sa1111_writel(ie1, mapbase + SA1111_INTEN1);
 }
 
-static void sa1111_unmask_highirq(unsigned int irq)
+static void sa1111_unmask_highirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
 	unsigned long ie1;
 
 	ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
-	ie1 |= SA1111_IRQMASK_HI(irq);
+	ie1 |= SA1111_IRQMASK_HI(d->irq);
 	sa1111_writel(ie1, mapbase + SA1111_INTEN1);
 }
 
@@ -369,11 +369,11 @@
  * be triggered.  In fact, its very difficult, if not impossible to get
  * INTSET to re-trigger the interrupt.
  */
-static int sa1111_retrigger_highirq(unsigned int irq)
+static int sa1111_retrigger_highirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_HI(irq);
+	unsigned int mask = SA1111_IRQMASK_HI(d->irq);
 	unsigned long ip1;
 	int i;
 
@@ -387,15 +387,15 @@
 
 	if (i == 8)
 		printk(KERN_ERR "Danger Will Robinson: failed to "
-			"re-trigger IRQ%d\n", irq);
+			"re-trigger IRQ%d\n", d->irq);
 	return i == 8 ? -1 : 0;
 }
 
-static int sa1111_type_highirq(unsigned int irq, unsigned int flags)
+static int sa1111_type_highirq(struct irq_data *d, unsigned int flags)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_HI(irq);
+	unsigned int mask = SA1111_IRQMASK_HI(d->irq);
 	unsigned long ip1;
 
 	if (flags == IRQ_TYPE_PROBE)
@@ -415,11 +415,11 @@
 	return 0;
 }
 
-static int sa1111_wake_highirq(unsigned int irq, unsigned int on)
+static int sa1111_wake_highirq(struct irq_data *d, unsigned int on)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_HI(irq);
+	unsigned int mask = SA1111_IRQMASK_HI(d->irq);
 	unsigned long we1;
 
 	we1 = sa1111_readl(mapbase + SA1111_WAKEEN1);
@@ -434,12 +434,12 @@
 
 static struct irq_chip sa1111_high_chip = {
 	.name		= "SA1111-h",
-	.ack		= sa1111_ack_irq,
-	.mask		= sa1111_mask_highirq,
-	.unmask		= sa1111_unmask_highirq,
-	.retrigger	= sa1111_retrigger_highirq,
-	.set_type	= sa1111_type_highirq,
-	.set_wake	= sa1111_wake_highirq,
+	.irq_ack	= sa1111_ack_irq,
+	.irq_mask	= sa1111_mask_highirq,
+	.irq_unmask	= sa1111_unmask_highirq,
+	.irq_retrigger	= sa1111_retrigger_highirq,
+	.irq_set_type	= sa1111_type_highirq,
+	.irq_set_wake	= sa1111_wake_highirq,
 };
 
 static void sa1111_setup_irq(struct sa1111 *sachip)
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index cb660bc..ae5fe72 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -204,26 +204,26 @@
 static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
 #endif /* CONFIG_PM */
 
-static void vic_ack_irq(unsigned int irq)
+static void vic_ack_irq(struct irq_data *d)
 {
-	void __iomem *base = get_irq_chip_data(irq);
-	irq &= 31;
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->irq & 31;
 	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
 	/* moreover, clear the soft-triggered, in case it was the reason */
 	writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
 }
 
-static void vic_mask_irq(unsigned int irq)
+static void vic_mask_irq(struct irq_data *d)
 {
-	void __iomem *base = get_irq_chip_data(irq);
-	irq &= 31;
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->irq & 31;
 	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
 }
 
-static void vic_unmask_irq(unsigned int irq)
+static void vic_unmask_irq(struct irq_data *d)
 {
-	void __iomem *base = get_irq_chip_data(irq);
-	irq &= 31;
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->irq & 31;
 	writel(1 << irq, base + VIC_INT_ENABLE);
 }
 
@@ -242,10 +242,10 @@
 	return NULL;
 }
 
-static int vic_set_wake(unsigned int irq, unsigned int on)
+static int vic_set_wake(struct irq_data *d, unsigned int on)
 {
-	struct vic_device *v = vic_from_irq(irq);
-	unsigned int off = irq & 31;
+	struct vic_device *v = vic_from_irq(d->irq);
+	unsigned int off = d->irq & 31;
 	u32 bit = 1 << off;
 
 	if (!v)
@@ -267,10 +267,10 @@
 
 static struct irq_chip vic_chip = {
 	.name		= "VIC",
-	.ack		= vic_ack_irq,
-	.mask		= vic_mask_irq,
-	.unmask		= vic_unmask_irq,
-	.set_wake	= vic_set_wake,
+	.irq_ack	= vic_ack_irq,
+	.irq_mask	= vic_mask_irq,
+	.irq_unmask	= vic_unmask_irq,
+	.irq_set_wake	= vic_set_wake,
 };
 
 static void __init vic_disable(void __iomem *base)
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 338ff19..7b1bb2b 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -285,7 +285,7 @@
 	if (__builtin_constant_p(x))
 	       return constant_fls(x);
 
-	asm("clz\t%0, %1" : "=r" (ret) : "r" (x) : "cc");
+	asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
        	ret = 32 - ret;
 	return ret;
 }
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index a84628b..c8e6ddf 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -115,4 +115,6 @@
 	}
 }
 
+extern void sched_clock_postinit(void);
+
 #endif
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index eed2f79..2ad62df 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -443,40 +443,40 @@
  *
  * They are not meant to be called directly, but via enable/disable_irq.
  */
-static void ecard_irq_unmask(unsigned int irqnr)
+static void ecard_irq_unmask(struct irq_data *d)
 {
-	ecard_t *ec = slot_to_ecard(irqnr - 32);
+	ecard_t *ec = slot_to_ecard(d->irq - 32);
 
 	if (ec) {
 		if (!ec->ops)
 			ec->ops = &ecard_default_ops;
 
 		if (ec->claimed && ec->ops->irqenable)
-			ec->ops->irqenable(ec, irqnr);
+			ec->ops->irqenable(ec, d->irq);
 		else
 			printk(KERN_ERR "ecard: rejecting request to "
-				"enable IRQs for %d\n", irqnr);
+				"enable IRQs for %d\n", d->irq);
 	}
 }
 
-static void ecard_irq_mask(unsigned int irqnr)
+static void ecard_irq_mask(struct irq_data *d)
 {
-	ecard_t *ec = slot_to_ecard(irqnr - 32);
+	ecard_t *ec = slot_to_ecard(d->irq - 32);
 
 	if (ec) {
 		if (!ec->ops)
 			ec->ops = &ecard_default_ops;
 
 		if (ec->ops && ec->ops->irqdisable)
-			ec->ops->irqdisable(ec, irqnr);
+			ec->ops->irqdisable(ec, d->irq);
 	}
 }
 
 static struct irq_chip ecard_chip = {
-	.name	= "ECARD",
-	.ack	= ecard_irq_mask,
-	.mask	= ecard_irq_mask,
-	.unmask = ecard_irq_unmask,
+	.name		= "ECARD",
+	.irq_ack	= ecard_irq_mask,
+	.irq_mask	= ecard_irq_mask,
+	.irq_unmask	= ecard_irq_unmask,
 };
 
 void ecard_enablefiq(unsigned int fiqnr)
@@ -551,7 +551,7 @@
 			printk(KERN_ERR "\nInterrupt lockup detected - "
 			       "disabling all expansion card interrupts\n");
 
-			desc->chip->mask(IRQ_EXPANSIONCARD);
+			desc->irq_data.chip->irq_mask(&desc->irq_data);
 			ecard_dump_irq_state();
 		}
 	} else
@@ -574,7 +574,7 @@
 	ecard_t *ec;
 	int called = 0;
 
-	desc->chip->mask(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 	for (ec = cards; ec; ec = ec->next) {
 		int pending;
 
@@ -591,7 +591,7 @@
 			called ++;
 		}
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 
 	if (called == 0)
 		ecard_check_lockup(desc);
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index bbecaac..8f57515 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -60,6 +60,8 @@
 str_a2:	.asciz	").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
 str_a3:	.asciz	"\nPlease check your kernel config and/or bootloader.\n"
 	.align
+#else
+	b	__error
 #endif
 
 /*
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 8135438..28536e3 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -88,7 +88,7 @@
 		seq_printf(p, "%*d: ", prec, i);
 		for_each_present_cpu(cpu)
 			seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
-		seq_printf(p, " %10s", desc->chip->name ? : "-");
+		seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-");
 		seq_printf(p, "  %s", action->name);
 		for (action = action->next; action; action = action->next)
 			seq_printf(p, ", %s", action->name);
@@ -181,10 +181,11 @@
 
 static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
 {
-	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
+	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu);
 
 	raw_spin_lock_irq(&desc->lock);
-	desc->chip->set_affinity(irq, cpumask_of(cpu));
+	desc->irq_data.chip->irq_set_affinity(&desc->irq_data,
+					      cpumask_of(cpu), false);
 	raw_spin_unlock_irq(&desc->lock);
 }
 
@@ -199,16 +200,18 @@
 	struct irq_desc *desc;
 
 	for_each_irq_desc(i, desc) {
-		if (desc->node == cpu) {
-			unsigned int newcpu = cpumask_any_and(desc->affinity,
+		struct irq_data *d = &desc->irq_data;
+
+		if (d->node == cpu) {
+			unsigned int newcpu = cpumask_any_and(d->affinity,
 							      cpu_online_mask);
 			if (newcpu >= nr_cpu_ids) {
 				if (printk_ratelimit())
 					printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
 					       i, cpu);
 
-				cpumask_setall(desc->affinity);
-				newcpu = cpumask_any_and(desc->affinity,
+				cpumask_setall(d->affinity);
+				newcpu = cpumask_any_and(d->affinity,
 							 cpu_online_mask);
 			}
 
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index e76fcaa..94bbedb 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -483,6 +483,7 @@
 	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
 }
 
+#ifdef CONFIG_MMU
 /*
  * The vectors page is always readable from user space for the
  * atomic helpers and the signal restart code.  Let's declare a mapping
@@ -503,3 +504,4 @@
 {
 	return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL;
 }
+#endif
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 2cdcc92..9a46370 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -34,7 +34,7 @@
 	sched_clock_update_fn = update;
 
 	/* calculate the mult/shift to convert counter ticks to ns. */
-	clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 60);
+	clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0);
 
 	r = rate;
 	if (r >= 4000000) {
@@ -60,10 +60,15 @@
 	 * sets the initial epoch.
 	 */
 	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
-	sched_clock_poll(sched_clock_timer.data);
+	update();
 
 	/*
 	 * Ensure that sched_clock() starts off at 0ns
 	 */
 	cd->epoch_ns = 0;
 }
+
+void __init sched_clock_postinit(void)
+{
+	sched_clock_poll(sched_clock_timer.data);
+}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3455ad3..420b8d6 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -518,25 +518,21 @@
 #endif
 }
 
-static void __init
-request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
+static void __init request_standard_resources(struct machine_desc *mdesc)
 {
+	struct memblock_region *region;
 	struct resource *res;
-	int i;
 
 	kernel_code.start   = virt_to_phys(_text);
 	kernel_code.end     = virt_to_phys(_etext - 1);
 	kernel_data.start   = virt_to_phys(_sdata);
 	kernel_data.end     = virt_to_phys(_end - 1);
 
-	for (i = 0; i < mi->nr_banks; i++) {
-		if (mi->bank[i].size == 0)
-			continue;
-
+	for_each_memblock(memory, region) {
 		res = alloc_bootmem_low(sizeof(*res));
 		res->name  = "System RAM";
-		res->start = mi->bank[i].start;
-		res->end   = mi->bank[i].start + mi->bank[i].size - 1;
+		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 
 		request_resource(&iomem_resource, res);
@@ -650,15 +646,17 @@
 
 __tagtable(ATAG_REVISION, parse_tag_revision);
 
-#ifndef CONFIG_CMDLINE_FORCE
 static int __init parse_tag_cmdline(const struct tag *tag)
 {
+#ifndef CONFIG_CMDLINE_FORCE
 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
+#else
+	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
+#endif /* CONFIG_CMDLINE_FORCE */
 	return 0;
 }
 
 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
-#endif /* CONFIG_CMDLINE_FORCE */
 
 /*
  * Scan the tag table for this tag, and call its parse function.
@@ -857,7 +855,7 @@
 	arm_memblock_init(&meminfo, mdesc);
 
 	paging_init(mdesc);
-	request_standard_resources(&meminfo, mdesc);
+	request_standard_resources(mdesc);
 
 #ifdef CONFIG_SMP
 	if (is_smp())
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index dd79074..fd91566 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -114,7 +114,7 @@
 		twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
 
 		printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
-			(twd_timer_rate / 100000) % 100);
+			(twd_timer_rate / 1000000) % 100);
 	}
 
 	load = twd_timer_rate / HZ;
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index c2e112e..381d23a 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -94,10 +94,13 @@
 	if (tsk != current) {
 #ifdef CONFIG_SMP
 		/*
-		 * What guarantees do we have here that 'tsk'
-		 * is not running on another CPU?
+		 * What guarantees do we have here that 'tsk' is not
+		 * running on another CPU?  For now, ignore it as we
+		 * can't guarantee we won't explode.
 		 */
-		BUG();
+		if (trace->nr_entries < trace->max_entries)
+			trace->entries[trace->nr_entries++] = ULONG_MAX;
+		return;
 #else
 		data.no_sched_functions = 1;
 		frame.fp = thread_saved_fp(tsk);
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index f1e2eb1..3d76bf2 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -29,6 +29,7 @@
 
 #include <asm/leds.h>
 #include <asm/thread_info.h>
+#include <asm/sched_clock.h>
 #include <asm/stacktrace.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
@@ -163,5 +164,8 @@
 {
 	system_timer = machine_desc->timer;
 	system_timer->init();
+#ifdef CONFIG_HAVE_SCHED_CLOCK
+	sched_clock_postinit();
+#endif
 }
 
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
index 8d6a876..3c9a05c 100644
--- a/arch/arm/lib/delay.S
+++ b/arch/arm/lib/delay.S
@@ -25,11 +25,15 @@
 		ldr	r2, .LC1
 		mul	r0, r2, r0
 ENTRY(__const_udelay)				@ 0 <= r0 <= 0x7fffff06
+		mov	r1, #-1
 		ldr	r2, .LC0
 		ldr	r2, [r2]		@ max = 0x01ffffff
+		add	r0, r0, r1, lsr #32-14
 		mov	r0, r0, lsr #14		@ max = 0x0001ffff
+		add	r2, r2, r1, lsr #32-10
 		mov	r2, r2, lsr #10		@ max = 0x00007fff
 		mul	r0, r2, r0		@ max = 2^32-1
+		add	r0, r0, r1, lsr #32-6
 		movs	r0, r0, lsr #6
 		moveq	pc, lr
 
diff --git a/arch/arm/mach-aaec2000/core.c b/arch/arm/mach-aaec2000/core.c
index 3ef6833..f8465bd 100644
--- a/arch/arm/mach-aaec2000/core.c
+++ b/arch/arm/mach-aaec2000/core.c
@@ -68,25 +68,25 @@
 /*
  * Interrupt handling routines
  */
-static void aaec2000_int_ack(unsigned int irq)
+static void aaec2000_int_ack(struct irq_data *d)
 {
-	IRQ_INTSR = 1 << irq;
+	IRQ_INTSR = 1 << d->irq;
 }
 
-static void aaec2000_int_mask(unsigned int irq)
+static void aaec2000_int_mask(struct irq_data *d)
 {
-	IRQ_INTENC |= (1 << irq);
+	IRQ_INTENC |= (1 << d->irq);
 }
 
-static void aaec2000_int_unmask(unsigned int irq)
+static void aaec2000_int_unmask(struct irq_data *d)
 {
-	IRQ_INTENS |= (1 << irq);
+	IRQ_INTENS |= (1 << d->irq);
 }
 
 static struct irq_chip aaec2000_irq_chip = {
-	.ack	= aaec2000_int_ack,
-	.mask	= aaec2000_int_mask,
-	.unmask	= aaec2000_int_unmask,
+	.irq_ack	= aaec2000_int_ack,
+	.irq_mask	= aaec2000_int_mask,
+	.irq_unmask	= aaec2000_int_unmask,
 };
 
 void __init aaec2000_init_irq(void)
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index c015b68..1939023 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -362,6 +362,12 @@
 	  Select this if you are using a Eukrea Electromatique's
 	  CPU9G20 Board <http://www.eukrea.com/>
 
+config MACH_ACMENETUSFOXG20
+	bool "Acme Systems srl FOX Board G20"
+	help
+	  Select this if you are using Acme Systems
+	  FOX Board G20 <http://www.acmesystems.it>
+
 config MACH_PORTUXG20
 	bool "taskit PortuxG20"
 	help
@@ -381,6 +387,13 @@
 	  Select this if you are using taskit's Stamp9G20 CPU module on this
 	  carrier board, beeing the decentralized unit of a building automation
 	  system; featuring nvram, eth-switch, iso-rs485, display, io
+
+config MACH_GSIA18S
+	bool "GS_IA18_S board"
+	help
+	  This enables support for the GS_IA18_S board
+	  produced by GeoSIG Ltd company. This is an internet accelerograph.
+	  <http://www.geosig.com>
 endif
 
 if (ARCH_AT91SAM9260 || ARCH_AT91SAM9G20)
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index d13add7..a83835e 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -63,9 +63,11 @@
 # AT91SAM9G20 board-specific support
 obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
 obj-$(CONFIG_MACH_CPU9G20)	+= board-cpu9krea.o
+obj-$(CONFIG_MACH_ACMENETUSFOXG20) += board-foxg20.o
 obj-$(CONFIG_MACH_STAMP9G20)	+= board-stamp9g20.o
 obj-$(CONFIG_MACH_PORTUXG20)	+= board-stamp9g20.o
 obj-$(CONFIG_MACH_PCONTROL_G20)	+= board-pcontrol-g20.o board-stamp9g20.o
+obj-$(CONFIG_MACH_GSIA18S)	+= board-gsia18s.o board-stamp9g20.o
 
 # AT91SAM9260/AT91SAM9G20 board-specific support
 obj-$(CONFIG_MACH_SNAPPER_9260)	+= board-snapper9260.o
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
new file mode 100644
index 0000000..dfc7dfe
--- /dev/null
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -0,0 +1,274 @@
+/*
+ *  Copyright (C) 2005 SAN People
+ *  Copyright (C) 2008 Atmel
+ *  Copyright (C) 2010 Lee McLoughlin - lee@lmmrtech.com
+ *  Copyright (C) 2010 Sergio Tanzilli - tanzilli@acmesystems.it
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/at73c213.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/clk.h>
+#include <linux/w1-gpio.h>
+
+#include <mach/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/board.h>
+#include <mach/at91sam9_smc.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+/*
+ * The FOX Board G20 hardware comes as the "Netus G20" board with
+ * just the cpu, ram, dataflash and two header connectors.
+ * This is plugged into the FOX Board which provides the ethernet,
+ * usb, rtc, leds, switch, ...
+ *
+ * For more info visit: http://www.acmesystems.it/foxg20
+ */
+
+
+static void __init foxg20_map_io(void)
+{
+	/* Initialize processor: 18.432 MHz crystal */
+	at91sam9260_initialize(18432000);
+
+	/* DBGU on ttyS0. (Rx & Tx only) */
+	at91_register_uart(0, 0, 0);
+
+	/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+	at91_register_uart(AT91SAM9260_ID_US0, 1,
+				ATMEL_UART_CTS
+				| ATMEL_UART_RTS
+				| ATMEL_UART_DTR
+				| ATMEL_UART_DSR
+				| ATMEL_UART_DCD
+				| ATMEL_UART_RI);
+
+	/* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */
+	at91_register_uart(AT91SAM9260_ID_US1, 2,
+		ATMEL_UART_CTS
+		| ATMEL_UART_RTS);
+
+	/* USART2 on ttyS3. (Rx & Tx only) */
+	at91_register_uart(AT91SAM9260_ID_US2, 3, 0);
+
+	/* USART3 on ttyS4. (Rx, Tx, RTS, CTS) */
+	at91_register_uart(AT91SAM9260_ID_US3, 4,
+		ATMEL_UART_CTS
+		| ATMEL_UART_RTS);
+
+	/* USART4 on ttyS5. (Rx & Tx only) */
+	at91_register_uart(AT91SAM9260_ID_US4, 5, 0);
+
+	/* USART5 on ttyS6. (Rx & Tx only) */
+	at91_register_uart(AT91SAM9260_ID_US5, 6, 0);
+
+	/* set serial console to ttyS0 (ie, DBGU) */
+	at91_set_serial_console(0);
+
+	/* Set the internal pull-up resistor on DRXD */
+	at91_set_A_periph(AT91_PIN_PB14, 1);
+
+}
+
+static void __init foxg20_init_irq(void)
+{
+	at91sam9260_init_interrupts(NULL);
+}
+
+
+/*
+ * USB Host port
+ */
+static struct at91_usbh_data __initdata foxg20_usbh_data = {
+	.ports		= 2,
+};
+
+/*
+ * USB Device port
+ */
+static struct at91_udc_data __initdata foxg20_udc_data = {
+	.vbus_pin	= AT91_PIN_PC6,
+	.pullup_pin	= 0,		/* pull-up driven by UDC */
+};
+
+
+/*
+ * SPI devices.
+ */
+static struct spi_board_info foxg20_spi_devices[] = {
+#if !defined(CONFIG_MMC_AT91)
+	{
+		.modalias	= "mtd_dataflash",
+		.chip_select	= 1,
+		.max_speed_hz	= 15 * 1000 * 1000,
+		.bus_num	= 0,
+	},
+#endif
+};
+
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata foxg20_macb_data = {
+	.phy_irq_pin	= AT91_PIN_PA7,
+	.is_rmii	= 1,
+};
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+static struct at91_mmc_data __initdata foxg20_mmc_data = {
+	.slot_b		= 1,
+	.wire4		= 1,
+};
+
+
+/*
+ * LEDs
+ */
+static struct gpio_led foxg20_leds[] = {
+	{	/* user led, red */
+		.name			= "user_led",
+		.gpio			= AT91_PIN_PC7,
+		.active_low		= 0,
+		.default_trigger	= "heartbeat",
+	},
+};
+
+
+/*
+ * GPIO Buttons
+ */
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+static struct gpio_keys_button foxg20_buttons[] = {
+	{
+		.gpio		= AT91_PIN_PC4,
+		.code		= BTN_1,
+		.desc		= "Button 1",
+		.active_low	= 1,
+		.wakeup		= 1,
+	},
+};
+
+static struct gpio_keys_platform_data foxg20_button_data = {
+	.buttons	= foxg20_buttons,
+	.nbuttons	= ARRAY_SIZE(foxg20_buttons),
+};
+
+static struct platform_device foxg20_button_device = {
+	.name		= "gpio-keys",
+	.id		= -1,
+	.num_resources	= 0,
+	.dev		= {
+		.platform_data	= &foxg20_button_data,
+	}
+};
+
+static void __init foxg20_add_device_buttons(void)
+{
+	at91_set_gpio_input(AT91_PIN_PC4, 1);	/* btn1 */
+	at91_set_deglitch(AT91_PIN_PC4, 1);
+
+	platform_device_register(&foxg20_button_device);
+}
+#else
+static void __init foxg20_add_device_buttons(void) {}
+#endif
+
+
+#if defined(CONFIG_W1_MASTER_GPIO) || defined(CONFIG_W1_MASTER_GPIO_MODULE)
+static struct w1_gpio_platform_data w1_gpio_pdata = {
+	/* If you choose to use a pin other than PB16 it needs to be 3.3V */
+	.pin		= AT91_PIN_PB16,
+	.is_open_drain  = 1,
+};
+
+static struct platform_device w1_device = {
+	.name			= "w1-gpio",
+	.id			= -1,
+	.dev.platform_data	= &w1_gpio_pdata,
+};
+
+static void __init at91_add_device_w1(void)
+{
+	at91_set_GPIO_periph(w1_gpio_pdata.pin, 1);
+	at91_set_multi_drive(w1_gpio_pdata.pin, 1);
+	platform_device_register(&w1_device);
+}
+
+#endif
+
+
+static struct i2c_board_info __initdata foxg20_i2c_devices[] = {
+	{
+		I2C_BOARD_INFO("24c512", 0x50),
+	},
+};
+
+
+static void __init foxg20_board_init(void)
+{
+	/* Serial */
+	at91_add_device_serial();
+	/* USB Host */
+	at91_add_device_usbh(&foxg20_usbh_data);
+	/* USB Device */
+	at91_add_device_udc(&foxg20_udc_data);
+	/* SPI */
+	at91_add_device_spi(foxg20_spi_devices, ARRAY_SIZE(foxg20_spi_devices));
+	/* Ethernet */
+	at91_add_device_eth(&foxg20_macb_data);
+	/* MMC */
+	at91_add_device_mmc(0, &foxg20_mmc_data);
+	/* I2C */
+	at91_add_device_i2c(foxg20_i2c_devices, ARRAY_SIZE(foxg20_i2c_devices));
+	/* LEDs */
+	at91_gpio_leds(foxg20_leds, ARRAY_SIZE(foxg20_leds));
+	/* Push Buttons */
+	foxg20_add_device_buttons();
+#if defined(CONFIG_W1_MASTER_GPIO) || defined(CONFIG_W1_MASTER_GPIO_MODULE)
+	at91_add_device_w1();
+#endif
+}
+
+MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20")
+	/* Maintainer: Sergio Tanzilli */
+	.boot_params	= AT91_SDRAM_BASE + 0x100,
+	.timer		= &at91sam926x_timer,
+	.map_io		= foxg20_map_io,
+	.init_irq	= foxg20_init_irq,
+	.init_machine	= foxg20_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/board-gsia18s.c b/arch/arm/mach-at91/board-gsia18s.c
new file mode 100644
index 0000000..bc28136
--- /dev/null
+++ b/arch/arm/mach-at91/board-gsia18s.c
@@ -0,0 +1,584 @@
+/*
+ *  Copyright (C) 2010 Christian Glindkamp <christian.glindkamp@taskit.de>
+ *                     taskit GmbH
+ *                2010 Igor Plyatov <plyatov@gmail.com>
+ *                     GeoSIG Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/w1-gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pcf857x.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/board.h>
+#include <mach/at91sam9_smc.h>
+#include <mach/gsia18s.h>
+#include <mach/stamp9g20.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+static void __init gsia18s_map_io(void)
+{
+	stamp9g20_map_io();
+
+	/*
+	 * USART0 on ttyS1 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI).
+	 * Used for Internal Analog Modem.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US0, 1,
+				ATMEL_UART_CTS | ATMEL_UART_RTS |
+				ATMEL_UART_DTR | ATMEL_UART_DSR |
+				ATMEL_UART_DCD | ATMEL_UART_RI);
+	/*
+	 * USART1 on ttyS2 (Rx, Tx, CTS, RTS).
+	 * Used for GPS or WiFi or Data stream.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US1, 2,
+				ATMEL_UART_CTS | ATMEL_UART_RTS);
+	/*
+	 * USART2 on ttyS3 (Rx, Tx, CTS, RTS).
+	 * Used for External Modem.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US2, 3,
+				ATMEL_UART_CTS | ATMEL_UART_RTS);
+	/*
+	 * USART3 on ttyS4 (Rx, Tx, RTS).
+	 * Used for RS-485.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US3, 4, ATMEL_UART_RTS);
+
+	/*
+	 * USART4 on ttyS5 (Rx, Tx).
+	 * Used for TRX433 Radio Module.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US4, 5, 0);
+}
+
+static void __init init_irq(void)
+{
+	at91sam9260_init_interrupts(NULL);
+}
+
+/*
+ * Two USB Host ports
+ */
+static struct at91_usbh_data __initdata usbh_data = {
+	.ports		= 2,
+};
+
+/*
+ * USB Device port
+ */
+static struct at91_udc_data __initdata udc_data = {
+	.vbus_pin	= AT91_PIN_PA22,
+	.pullup_pin	= 0,		/* pull-up driven by UDC */
+};
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata macb_data = {
+	.phy_irq_pin	= AT91_PIN_PA28,
+	.is_rmii	= 1,
+};
+
+/*
+ * LEDs and GPOs
+ */
+static struct gpio_led gpio_leds[] = {
+	{
+		.name			= "gpo:spi1reset",
+		.gpio			= AT91_PIN_PC1,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "gpo:trig_net_out",
+		.gpio			= AT91_PIN_PB20,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "gpo:trig_net_dir",
+		.gpio			= AT91_PIN_PB19,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "gpo:charge_dis",
+		.gpio			= AT91_PIN_PC2,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "led:event",
+		.gpio			= AT91_PIN_PB17,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "led:lan",
+		.gpio			= AT91_PIN_PB18,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "led:error",
+		.gpio			= AT91_PIN_PB16,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	}
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+	.leds		= gpio_leds,
+	.num_leds	= ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device leds = {
+	.name	= "leds-gpio",
+	.id	= 0,
+	.dev	= {
+		.platform_data	= &gpio_led_info,
+	}
+};
+
+static void __init gsia18s_leds_init(void)
+{
+	platform_device_register(&leds);
+}
+
+/* PCF8574 0x20 GPIO - U1 on the GS_IA18-CB_V3 board */
+static struct gpio_led pcf_gpio_leds1[] = {
+	{ /* bit 0 */
+		.name			= "gpo:hdc_power",
+		.gpio			= PCF_GPIO_HDC_POWER,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 1 */
+		.name			= "gpo:wifi_setup",
+		.gpio			= PCF_GPIO_WIFI_SETUP,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 2 */
+		.name			= "gpo:wifi_enable",
+		.gpio			= PCF_GPIO_WIFI_ENABLE,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 3	*/
+		.name			= "gpo:wifi_reset",
+		.gpio			= PCF_GPIO_WIFI_RESET,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	},
+	/* bit 4 used as GPI	*/
+	{ /* bit 5 */
+		.name			= "gpo:gps_setup",
+		.gpio			= PCF_GPIO_GPS_SETUP,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 6 */
+		.name			= "gpo:gps_standby",
+		.gpio			= PCF_GPIO_GPS_STANDBY,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	},
+	{ /* bit 7 */
+		.name			= "gpo:gps_power",
+		.gpio			= PCF_GPIO_GPS_POWER,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	}
+};
+
+static struct gpio_led_platform_data pcf_gpio_led_info1 = {
+	.leds		= pcf_gpio_leds1,
+	.num_leds	= ARRAY_SIZE(pcf_gpio_leds1),
+};
+
+static struct platform_device pcf_leds1 = {
+	.name	= "leds-gpio", /* GS_IA18-CB_board */
+	.id	= 1,
+	.dev	= {
+		.platform_data	= &pcf_gpio_led_info1,
+	}
+};
+
+/* PCF8574 0x22 GPIO - U1 on the GS_2G_OPT1-A_V0 board (Alarm) */
+static struct gpio_led pcf_gpio_leds2[] = {
+	{ /* bit 0 */
+		.name			= "gpo:alarm_1",
+		.gpio			= PCF_GPIO_ALARM1,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 1 */
+		.name			= "gpo:alarm_2",
+		.gpio			= PCF_GPIO_ALARM2,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 2 */
+		.name			= "gpo:alarm_3",
+		.gpio			= PCF_GPIO_ALARM3,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 3 */
+		.name			= "gpo:alarm_4",
+		.gpio			= PCF_GPIO_ALARM4,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	/* bits 4, 5, 6 not used */
+	{ /* bit 7 */
+		.name			= "gpo:alarm_v_relay_on",
+		.gpio			= PCF_GPIO_ALARM_V_RELAY_ON,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+};
+
+static struct gpio_led_platform_data pcf_gpio_led_info2 = {
+	.leds		= pcf_gpio_leds2,
+	.num_leds	= ARRAY_SIZE(pcf_gpio_leds2),
+};
+
+static struct platform_device pcf_leds2 = {
+	.name	= "leds-gpio",
+	.id	= 2,
+	.dev	= {
+		.platform_data	= &pcf_gpio_led_info2,
+	}
+};
+
+/* PCF8574 0x24 GPIO U1 on the GS_2G-OPT23-A_V0 board (Modem) */
+static struct gpio_led pcf_gpio_leds3[] = {
+	{ /* bit 0 */
+		.name			= "gpo:modem_power",
+		.gpio			= PCF_GPIO_MODEM_POWER,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+		/* bits 1 and 2 not used */
+	{ /* bit 3 */
+		.name			= "gpo:modem_reset",
+		.gpio			= PCF_GPIO_MODEM_RESET,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	},
+		/* bits 4, 5 and 6 not used */
+	{ /* bit 7 */
+		.name			= "gpo:trx_reset",
+		.gpio			= PCF_GPIO_TRX_RESET,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	}
+};
+
+static struct gpio_led_platform_data pcf_gpio_led_info3 = {
+	.leds		= pcf_gpio_leds3,
+	.num_leds	= ARRAY_SIZE(pcf_gpio_leds3),
+};
+
+static struct platform_device pcf_leds3 = {
+	.name	= "leds-gpio",
+	.id	= 3,
+	.dev	= {
+		.platform_data	= &pcf_gpio_led_info3,
+	}
+};
+
+static void __init gsia18s_pcf_leds_init(void)
+{
+	platform_device_register(&pcf_leds1);
+	platform_device_register(&pcf_leds2);
+	platform_device_register(&pcf_leds3);
+}
+
+/*
+ * SPI busses.
+ */
+static struct spi_board_info gsia18s_spi_devices[] = {
+	{ /* User accessible spi0, cs0 used for communication with MSP RTC */
+		.modalias	= "spidev",
+		.bus_num	= 0,
+		.chip_select	= 0,
+		.max_speed_hz	= 580000,
+		.mode		= SPI_MODE_1,
+	},
+	{ /* User accessible spi1, cs0 used for communication with int. DSP */
+		.modalias	= "spidev",
+		.bus_num	= 1,
+		.chip_select	= 0,
+		.max_speed_hz	= 5600000,
+		.mode		= SPI_MODE_0,
+	},
+	{ /* User accessible spi1, cs1 used for communication with ext. DSP */
+		.modalias	= "spidev",
+		.bus_num	= 1,
+		.chip_select	= 1,
+		.max_speed_hz	= 5600000,
+		.mode		= SPI_MODE_0,
+	},
+	{ /* User accessible spi1, cs2 used for communication with ext. DSP */
+		.modalias	= "spidev",
+		.bus_num	= 1,
+		.chip_select	= 2,
+		.max_speed_hz	= 5600000,
+		.mode		= SPI_MODE_0,
+	},
+	{ /* User accessible spi1, cs3 used for communication with ext. DSP */
+		.modalias	= "spidev",
+		.bus_num	= 1,
+		.chip_select	= 3,
+		.max_speed_hz	= 5600000,
+		.mode		= SPI_MODE_0,
+	}
+};
+
+/*
+ * GPI Buttons
+ */
+static struct gpio_keys_button buttons[] = {
+	{
+		.gpio		= GPIO_TRIG_NET_IN,
+		.code		= BTN_1,
+		.desc		= "TRIG_NET_IN",
+		.type		= EV_KEY,
+		.active_low	= 0,
+		.wakeup		= 1,
+	},
+	{ /* SW80 on the GS_IA18_S-MN board*/
+		.gpio		= GPIO_CARD_UNMOUNT_0,
+		.code		= BTN_2,
+		.desc		= "Card umount 0",
+		.type		= EV_KEY,
+		.active_low	= 1,
+		.wakeup		= 1,
+	},
+	{ /* SW79 on the GS_IA18_S-MN board*/
+		.gpio		= GPIO_CARD_UNMOUNT_1,
+		.code		= BTN_3,
+		.desc		= "Card umount 1",
+		.type		= EV_KEY,
+		.active_low	= 1,
+		.wakeup		= 1,
+	},
+	{ /* SW280 on the GS_IA18-CB board*/
+		.gpio		= GPIO_KEY_POWER,
+		.code		= KEY_POWER,
+		.desc		= "Power Off Button",
+		.type		= EV_KEY,
+		.active_low	= 0,
+		.wakeup		= 1,
+	}
+};
+
+static struct gpio_keys_platform_data button_data = {
+	.buttons	= buttons,
+	.nbuttons	= ARRAY_SIZE(buttons),
+};
+
+static struct platform_device button_device = {
+	.name		= "gpio-keys",
+	.id		= -1,
+	.num_resources	= 0,
+	.dev		= {
+		.platform_data	= &button_data,
+	}
+};
+
+static void __init gsia18s_add_device_buttons(void)
+{
+	at91_set_gpio_input(GPIO_TRIG_NET_IN, 1);
+	at91_set_deglitch(GPIO_TRIG_NET_IN, 1);
+	at91_set_gpio_input(GPIO_CARD_UNMOUNT_0, 1);
+	at91_set_deglitch(GPIO_CARD_UNMOUNT_0, 1);
+	at91_set_gpio_input(GPIO_CARD_UNMOUNT_1, 1);
+	at91_set_deglitch(GPIO_CARD_UNMOUNT_1, 1);
+	at91_set_gpio_input(GPIO_KEY_POWER, 0);
+	at91_set_deglitch(GPIO_KEY_POWER, 1);
+
+	platform_device_register(&button_device);
+}
+
+/*
+ * I2C
+ */
+static int pcf8574x_0x20_setup(struct i2c_client *client, int gpio,
+				unsigned int ngpio, void *context)
+{
+	int status;
+
+	status = gpio_request(gpio + PCF_GPIO_ETH_DETECT, "eth_det");
+	if (status < 0) {
+		pr_err("error: can't request GPIO%d\n",
+			gpio + PCF_GPIO_ETH_DETECT);
+		return status;
+	}
+	status = gpio_direction_input(gpio + PCF_GPIO_ETH_DETECT);
+	if (status < 0) {
+		pr_err("error: can't setup GPIO%d as input\n",
+			gpio + PCF_GPIO_ETH_DETECT);
+		return status;
+	}
+	status = gpio_export(gpio + PCF_GPIO_ETH_DETECT, false);
+	if (status < 0) {
+		pr_err("error: can't export GPIO%d\n",
+			gpio + PCF_GPIO_ETH_DETECT);
+		return status;
+	}
+	status = gpio_sysfs_set_active_low(gpio + PCF_GPIO_ETH_DETECT, 1);
+	if (status < 0) {
+		pr_err("error: gpio_sysfs_set active_low(GPIO%d, 1)\n",
+			gpio + PCF_GPIO_ETH_DETECT);
+		return status;
+	}
+
+	return 0;
+}
+
+static int pcf8574x_0x20_teardown(struct i2c_client *client, int gpio,
+					unsigned ngpio, void *context)
+{
+	gpio_free(gpio + PCF_GPIO_ETH_DETECT);
+	return 0;
+}
+
+static struct pcf857x_platform_data pcf20_pdata = {
+	.gpio_base	= GS_IA18_S_PCF_GPIO_BASE0,
+	.n_latch	= (1 << 4),
+	.setup		= pcf8574x_0x20_setup,
+	.teardown	= pcf8574x_0x20_teardown,
+};
+
+static struct pcf857x_platform_data pcf22_pdata = {
+	.gpio_base	= GS_IA18_S_PCF_GPIO_BASE1,
+};
+
+static struct pcf857x_platform_data pcf24_pdata = {
+	.gpio_base	= GS_IA18_S_PCF_GPIO_BASE2,
+};
+
+static struct i2c_board_info __initdata gsia18s_i2c_devices[] = {
+	{ /* U1 on the GS_IA18-CB_V3 board */
+		I2C_BOARD_INFO("pcf8574", 0x20),
+		.platform_data = &pcf20_pdata,
+	},
+	{ /* U1 on the GS_2G_OPT1-A_V0 board (Alarm) */
+		I2C_BOARD_INFO("pcf8574", 0x22),
+		.platform_data = &pcf22_pdata,
+	},
+	{ /* U1 on the GS_2G-OPT23-A_V0 board (Modem) */
+		I2C_BOARD_INFO("pcf8574", 0x24),
+		.platform_data = &pcf24_pdata,
+	},
+	{ /* U161 on the GS_IA18_S-MN board */
+		I2C_BOARD_INFO("24c1024", 0x50),
+	},
+	{ /* U162 on the GS_IA18_S-MN board */
+		I2C_BOARD_INFO("24c01", 0x53),
+	},
+};
+
+/*
+ * Compact Flash
+ */
+static struct at91_cf_data __initdata gsia18s_cf1_data = {
+	.irq_pin	= AT91_PIN_PA27,
+	.det_pin	= AT91_PIN_PB30,
+	.rst_pin	= AT91_PIN_PB31,
+	.chipselect	= 5,
+	.flags		= AT91_CF_TRUE_IDE,
+};
+
+/* Power Off by RTC */
+static void gsia18s_power_off(void)
+{
+	pr_notice("Power supply will be switched off automatically now or after 60 seconds without ArmDAS.\n");
+	at91_set_gpio_output(AT91_PIN_PA25, 1);
+	/* Spin to death... */
+	while (1)
+		;
+}
+
+static int __init gsia18s_power_off_init(void)
+{
+	pm_power_off = gsia18s_power_off;
+	return 0;
+}
+
+/* ---------------------------------------------------------------------------*/
+
+static void __init gsia18s_board_init(void)
+{
+	stamp9g20_board_init();
+	at91_add_device_usbh(&usbh_data);
+	at91_add_device_udc(&udc_data);
+	at91_add_device_eth(&macb_data);
+	gsia18s_leds_init();
+	gsia18s_pcf_leds_init();
+	gsia18s_add_device_buttons();
+	at91_add_device_i2c(gsia18s_i2c_devices,
+				ARRAY_SIZE(gsia18s_i2c_devices));
+	at91_add_device_cf(&gsia18s_cf1_data);
+	at91_add_device_spi(gsia18s_spi_devices,
+				ARRAY_SIZE(gsia18s_spi_devices));
+	gsia18s_power_off_init();
+}
+
+MACHINE_START(GSIA18S, "GS_IA18_S")
+	.boot_params	= AT91_SDRAM_BASE + 0x100,
+	.timer		= &at91sam926x_timer,
+	.map_io		= gsia18s_map_io,
+	.init_irq	= init_irq,
+	.init_machine	= gsia18s_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 86ff4b5..6c999db 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -37,7 +37,6 @@
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
 
-#include <mach/hardware.h>
 #include <mach/board.h>
 #include <mach/gpio.h>
 #include <mach/at91sam9_smc.h>
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c
index ae4772e..af818a2 100644
--- a/arch/arm/mach-at91/gpio.c
+++ b/arch/arm/mach-at91/gpio.c
@@ -274,10 +274,10 @@
 static u32 wakeups[MAX_GPIO_BANKS];
 static u32 backups[MAX_GPIO_BANKS];
 
-static int gpio_irq_set_wake(unsigned pin, unsigned state)
+static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
 {
-	unsigned	mask = pin_to_mask(pin);
-	unsigned	bank = (pin - PIN_BASE) / 32;
+	unsigned	mask = pin_to_mask(d->irq);
+	unsigned	bank = (d->irq - PIN_BASE) / 32;
 
 	if (unlikely(bank >= MAX_GPIO_BANKS))
 		return -EINVAL;
@@ -344,25 +344,25 @@
  * IRQ0..IRQ6 should be configurable, e.g. level vs edge triggering.
  */
 
-static void gpio_irq_mask(unsigned pin)
+static void gpio_irq_mask(struct irq_data *d)
 {
-	void __iomem	*pio = pin_to_controller(pin);
-	unsigned	mask = pin_to_mask(pin);
+	void __iomem	*pio = pin_to_controller(d->irq);
+	unsigned	mask = pin_to_mask(d->irq);
 
 	if (pio)
 		__raw_writel(mask, pio + PIO_IDR);
 }
 
-static void gpio_irq_unmask(unsigned pin)
+static void gpio_irq_unmask(struct irq_data *d)
 {
-	void __iomem	*pio = pin_to_controller(pin);
-	unsigned	mask = pin_to_mask(pin);
+	void __iomem	*pio = pin_to_controller(d->irq);
+	unsigned	mask = pin_to_mask(d->irq);
 
 	if (pio)
 		__raw_writel(mask, pio + PIO_IER);
 }
 
-static int gpio_irq_type(unsigned pin, unsigned type)
+static int gpio_irq_type(struct irq_data *d, unsigned type)
 {
 	switch (type) {
 	case IRQ_TYPE_NONE:
@@ -375,10 +375,10 @@
 
 static struct irq_chip gpio_irqchip = {
 	.name		= "GPIO",
-	.mask		= gpio_irq_mask,
-	.unmask		= gpio_irq_unmask,
-	.set_type	= gpio_irq_type,
-	.set_wake	= gpio_irq_set_wake,
+	.irq_mask	= gpio_irq_mask,
+	.irq_unmask	= gpio_irq_unmask,
+	.irq_set_type	= gpio_irq_type,
+	.irq_set_wake	= gpio_irq_set_wake,
 };
 
 static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -393,7 +393,7 @@
 	pio = at91_gpio->regbase;
 
 	/* temporarily mask (level sensitive) parent IRQ */
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	for (;;) {
 		/* Reading ISR acks pending (edge triggered) GPIO interrupts.
 		 * When there none are pending, we're finished unless we need
@@ -419,7 +419,7 @@
 					 * another IRQ must be generated before it actually gets
 					 * here to be disabled on the GPIO controller.
 					 */
-					gpio_irq_mask(pin);
+					gpio_irq_mask(irq_get_irq_data(pin));
 				}
 				else
 					generic_handle_irq(pin);
@@ -429,7 +429,7 @@
 			isr >>= 1;
 		}
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 	/* now it may re-trigger */
 }
 
diff --git a/arch/arm/mach-at91/include/mach/gsia18s.h b/arch/arm/mach-at91/include/mach/gsia18s.h
new file mode 100644
index 0000000..307c194
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/gsia18s.h
@@ -0,0 +1,33 @@
+/* Buttons */
+#define GPIO_TRIG_NET_IN		AT91_PIN_PB21
+#define GPIO_CARD_UNMOUNT_0		AT91_PIN_PB13
+#define GPIO_CARD_UNMOUNT_1		AT91_PIN_PB12
+#define GPIO_KEY_POWER			AT91_PIN_PA25
+
+/* PCF8574 0x20 GPIO - U1 on the GS_IA18-CB_V3 board */
+#define GS_IA18_S_PCF_GPIO_BASE0	NR_BUILTIN_GPIO
+#define PCF_GPIO_HDC_POWER		(GS_IA18_S_PCF_GPIO_BASE0 + 0)
+#define PCF_GPIO_WIFI_SETUP		(GS_IA18_S_PCF_GPIO_BASE0 + 1)
+#define PCF_GPIO_WIFI_ENABLE		(GS_IA18_S_PCF_GPIO_BASE0 + 2)
+#define PCF_GPIO_WIFI_RESET		(GS_IA18_S_PCF_GPIO_BASE0 + 3)
+#define PCF_GPIO_ETH_DETECT		4 /* this is a GPI */
+#define PCF_GPIO_GPS_SETUP		(GS_IA18_S_PCF_GPIO_BASE0 + 5)
+#define PCF_GPIO_GPS_STANDBY		(GS_IA18_S_PCF_GPIO_BASE0 + 6)
+#define PCF_GPIO_GPS_POWER		(GS_IA18_S_PCF_GPIO_BASE0 + 7)
+
+/* PCF8574 0x22 GPIO - U1 on the GS_2G_OPT1-A_V0 board (Alarm) */
+#define GS_IA18_S_PCF_GPIO_BASE1	(GS_IA18_S_PCF_GPIO_BASE0 + 8)
+#define PCF_GPIO_ALARM1			(GS_IA18_S_PCF_GPIO_BASE1 + 0)
+#define PCF_GPIO_ALARM2			(GS_IA18_S_PCF_GPIO_BASE1 + 1)
+#define PCF_GPIO_ALARM3			(GS_IA18_S_PCF_GPIO_BASE1 + 2)
+#define PCF_GPIO_ALARM4			(GS_IA18_S_PCF_GPIO_BASE1 + 3)
+/* bits 4, 5, 6 not used */
+#define PCF_GPIO_ALARM_V_RELAY_ON	(GS_IA18_S_PCF_GPIO_BASE1 + 7)
+
+/* PCF8574 0x24 GPIO U1 on the GS_2G-OPT23-A_V0 board (Modem) */
+#define GS_IA18_S_PCF_GPIO_BASE2	(GS_IA18_S_PCF_GPIO_BASE1 + 8)
+#define PCF_GPIO_MODEM_POWER		(GS_IA18_S_PCF_GPIO_BASE2 + 0)
+#define PCF_GPIO_MODEM_RESET		(GS_IA18_S_PCF_GPIO_BASE2 + 3)
+/* bits 1, 2, 4, 5 not used */
+#define PCF_GPIO_TRX_RESET		(GS_IA18_S_PCF_GPIO_BASE2 + 6)
+/* bit 7 not used */
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index da3494a..b56d6b3a 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -34,23 +34,23 @@
 #include <asm/mach/map.h>
 
 
-static void at91_aic_mask_irq(unsigned int irq)
+static void at91_aic_mask_irq(struct irq_data *d)
 {
 	/* Disable interrupt on AIC */
-	at91_sys_write(AT91_AIC_IDCR, 1 << irq);
+	at91_sys_write(AT91_AIC_IDCR, 1 << d->irq);
 }
 
-static void at91_aic_unmask_irq(unsigned int irq)
+static void at91_aic_unmask_irq(struct irq_data *d)
 {
 	/* Enable interrupt on AIC */
-	at91_sys_write(AT91_AIC_IECR, 1 << irq);
+	at91_sys_write(AT91_AIC_IECR, 1 << d->irq);
 }
 
 unsigned int at91_extern_irq;
 
 #define is_extern_irq(irq) ((1 << (irq)) & at91_extern_irq)
 
-static int at91_aic_set_type(unsigned irq, unsigned type)
+static int at91_aic_set_type(struct irq_data *d, unsigned type)
 {
 	unsigned int smr, srctype;
 
@@ -62,13 +62,13 @@
 		srctype = AT91_AIC_SRCTYPE_RISING;
 		break;
 	case IRQ_TYPE_LEVEL_LOW:
-		if ((irq == AT91_ID_FIQ) || is_extern_irq(irq))		/* only supported on external interrupts */
+		if ((d->irq == AT91_ID_FIQ) || is_extern_irq(d->irq))		/* only supported on external interrupts */
 			srctype = AT91_AIC_SRCTYPE_LOW;
 		else
 			return -EINVAL;
 		break;
 	case IRQ_TYPE_EDGE_FALLING:
-		if ((irq == AT91_ID_FIQ) || is_extern_irq(irq))		/* only supported on external interrupts */
+		if ((d->irq == AT91_ID_FIQ) || is_extern_irq(d->irq))		/* only supported on external interrupts */
 			srctype = AT91_AIC_SRCTYPE_FALLING;
 		else
 			return -EINVAL;
@@ -77,8 +77,8 @@
 		return -EINVAL;
 	}
 
-	smr = at91_sys_read(AT91_AIC_SMR(irq)) & ~AT91_AIC_SRCTYPE;
-	at91_sys_write(AT91_AIC_SMR(irq), smr | srctype);
+	smr = at91_sys_read(AT91_AIC_SMR(d->irq)) & ~AT91_AIC_SRCTYPE;
+	at91_sys_write(AT91_AIC_SMR(d->irq), smr | srctype);
 	return 0;
 }
 
@@ -87,15 +87,15 @@
 static u32 wakeups;
 static u32 backups;
 
-static int at91_aic_set_wake(unsigned irq, unsigned value)
+static int at91_aic_set_wake(struct irq_data *d, unsigned value)
 {
-	if (unlikely(irq >= 32))
+	if (unlikely(d->irq >= 32))
 		return -EINVAL;
 
 	if (value)
-		wakeups |= (1 << irq);
+		wakeups |= (1 << d->irq);
 	else
-		wakeups &= ~(1 << irq);
+		wakeups &= ~(1 << d->irq);
 
 	return 0;
 }
@@ -119,11 +119,11 @@
 
 static struct irq_chip at91_aic_chip = {
 	.name		= "AIC",
-	.ack		= at91_aic_mask_irq,
-	.mask		= at91_aic_mask_irq,
-	.unmask		= at91_aic_unmask_irq,
-	.set_type	= at91_aic_set_type,
-	.set_wake	= at91_aic_set_wake,
+	.irq_ack	= at91_aic_mask_irq,
+	.irq_mask	= at91_aic_mask_irq,
+	.irq_unmask	= at91_aic_unmask_irq,
+	.irq_set_type	= at91_aic_set_type,
+	.irq_set_wake	= at91_aic_set_wake,
 };
 
 /*
diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c
index e315263..84dcda0 100644
--- a/arch/arm/mach-bcmring/irq.c
+++ b/arch/arm/mach-bcmring/irq.c
@@ -30,61 +30,61 @@
 #include <mach/csp/intcHw_reg.h>
 #include <mach/csp/mm_io.h>
 
-static void bcmring_mask_irq0(unsigned int irq)
+static void bcmring_mask_irq0(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_INTC0_START),
+	writel(1 << (d->irq - IRQ_INTC0_START),
 	       MM_IO_BASE_INTC0 + INTCHW_INTENCLEAR);
 }
 
-static void bcmring_unmask_irq0(unsigned int irq)
+static void bcmring_unmask_irq0(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_INTC0_START),
+	writel(1 << (d->irq - IRQ_INTC0_START),
 	       MM_IO_BASE_INTC0 + INTCHW_INTENABLE);
 }
 
-static void bcmring_mask_irq1(unsigned int irq)
+static void bcmring_mask_irq1(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_INTC1_START),
+	writel(1 << (d->irq - IRQ_INTC1_START),
 	       MM_IO_BASE_INTC1 + INTCHW_INTENCLEAR);
 }
 
-static void bcmring_unmask_irq1(unsigned int irq)
+static void bcmring_unmask_irq1(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_INTC1_START),
+	writel(1 << (d->irq - IRQ_INTC1_START),
 	       MM_IO_BASE_INTC1 + INTCHW_INTENABLE);
 }
 
-static void bcmring_mask_irq2(unsigned int irq)
+static void bcmring_mask_irq2(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_SINTC_START),
+	writel(1 << (d->irq - IRQ_SINTC_START),
 	       MM_IO_BASE_SINTC + INTCHW_INTENCLEAR);
 }
 
-static void bcmring_unmask_irq2(unsigned int irq)
+static void bcmring_unmask_irq2(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_SINTC_START),
+	writel(1 << (d->irq - IRQ_SINTC_START),
 	       MM_IO_BASE_SINTC + INTCHW_INTENABLE);
 }
 
 static struct irq_chip bcmring_irq0_chip = {
 	.name = "ARM-INTC0",
-	.ack = bcmring_mask_irq0,
-	.mask = bcmring_mask_irq0,	/* mask a specific interrupt, blocking its delivery. */
-	.unmask = bcmring_unmask_irq0,	/* unmaks an interrupt */
+	.irq_ack = bcmring_mask_irq0,
+	.irq_mask = bcmring_mask_irq0,	/* mask a specific interrupt, blocking its delivery. */
+	.irq_unmask = bcmring_unmask_irq0,	/* unmaks an interrupt */
 };
 
 static struct irq_chip bcmring_irq1_chip = {
 	.name = "ARM-INTC1",
-	.ack = bcmring_mask_irq1,
-	.mask = bcmring_mask_irq1,
-	.unmask = bcmring_unmask_irq1,
+	.irq_ack = bcmring_mask_irq1,
+	.irq_mask = bcmring_mask_irq1,
+	.irq_unmask = bcmring_unmask_irq1,
 };
 
 static struct irq_chip bcmring_irq2_chip = {
 	.name = "ARM-SINTC",
-	.ack = bcmring_mask_irq2,
-	.mask = bcmring_mask_irq2,
-	.unmask = bcmring_unmask_irq2,
+	.irq_ack = bcmring_mask_irq2,
+	.irq_mask = bcmring_mask_irq2,
+	.irq_unmask = bcmring_unmask_irq2,
 };
 
 static void vic_init(void __iomem *base, struct irq_chip *chip,
diff --git a/arch/arm/mach-clps711x/irq.c b/arch/arm/mach-clps711x/irq.c
index 9a12d85..86da7a1 100644
--- a/arch/arm/mach-clps711x/irq.c
+++ b/arch/arm/mach-clps711x/irq.c
@@ -27,24 +27,24 @@
 
 #include <asm/hardware/clps7111.h>
 
-static void int1_mask(unsigned int irq)
+static void int1_mask(struct irq_data *d)
 {
 	u32 intmr1;
 
 	intmr1 = clps_readl(INTMR1);
-	intmr1 &= ~(1 << irq);
+	intmr1 &= ~(1 << d->irq);
 	clps_writel(intmr1, INTMR1);
 }
 
-static void int1_ack(unsigned int irq)
+static void int1_ack(struct irq_data *d)
 {
 	u32 intmr1;
 
 	intmr1 = clps_readl(INTMR1);
-	intmr1 &= ~(1 << irq);
+	intmr1 &= ~(1 << d->irq);
 	clps_writel(intmr1, INTMR1);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_CSINT:  clps_writel(0, COEOI);  break;
 	case IRQ_TC1OI:  clps_writel(0, TC1EOI); break;
 	case IRQ_TC2OI:  clps_writel(0, TC2EOI); break;
@@ -54,56 +54,56 @@
 	}
 }
 
-static void int1_unmask(unsigned int irq)
+static void int1_unmask(struct irq_data *d)
 {
 	u32 intmr1;
 
 	intmr1 = clps_readl(INTMR1);
-	intmr1 |= 1 << irq;
+	intmr1 |= 1 << d->irq;
 	clps_writel(intmr1, INTMR1);
 }
 
 static struct irq_chip int1_chip = {
-	.ack	= int1_ack,
-	.mask	= int1_mask,
-	.unmask = int1_unmask,
+	.irq_ack	= int1_ack,
+	.irq_mask	= int1_mask,
+	.irq_unmask	= int1_unmask,
 };
 
-static void int2_mask(unsigned int irq)
+static void int2_mask(struct irq_data *d)
 {
 	u32 intmr2;
 
 	intmr2 = clps_readl(INTMR2);
-	intmr2 &= ~(1 << (irq - 16));
+	intmr2 &= ~(1 << (d->irq - 16));
 	clps_writel(intmr2, INTMR2);
 }
 
-static void int2_ack(unsigned int irq)
+static void int2_ack(struct irq_data *d)
 {
 	u32 intmr2;
 
 	intmr2 = clps_readl(INTMR2);
-	intmr2 &= ~(1 << (irq - 16));
+	intmr2 &= ~(1 << (d->irq - 16));
 	clps_writel(intmr2, INTMR2);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_KBDINT: clps_writel(0, KBDEOI); break;
 	}
 }
 
-static void int2_unmask(unsigned int irq)
+static void int2_unmask(struct irq_data *d)
 {
 	u32 intmr2;
 
 	intmr2 = clps_readl(INTMR2);
-	intmr2 |= 1 << (irq - 16);
+	intmr2 |= 1 << (d->irq - 16);
 	clps_writel(intmr2, INTMR2);
 }
 
 static struct irq_chip int2_chip = {
-	.ack	= int2_ack,
-	.mask	= int2_mask,
-	.unmask = int2_unmask,
+	.irq_ack	= int2_ack,
+	.irq_mask	= int2_mask,
+	.irq_unmask	= int2_unmask,
 };
 
 void __init clps711x_init_irq(void)
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index bb4c40e..9abc80a 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -26,30 +26,30 @@
 	__raw_writel(value, davinci_intc_base + offset);
 }
 
-static void cp_intc_ack_irq(unsigned int irq)
+static void cp_intc_ack_irq(struct irq_data *d)
 {
-	cp_intc_write(irq, CP_INTC_SYS_STAT_IDX_CLR);
+	cp_intc_write(d->irq, CP_INTC_SYS_STAT_IDX_CLR);
 }
 
 /* Disable interrupt */
-static void cp_intc_mask_irq(unsigned int irq)
+static void cp_intc_mask_irq(struct irq_data *d)
 {
 	/* XXX don't know why we need to disable nIRQ here... */
 	cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_CLR);
-	cp_intc_write(irq, CP_INTC_SYS_ENABLE_IDX_CLR);
+	cp_intc_write(d->irq, CP_INTC_SYS_ENABLE_IDX_CLR);
 	cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
 }
 
 /* Enable interrupt */
-static void cp_intc_unmask_irq(unsigned int irq)
+static void cp_intc_unmask_irq(struct irq_data *d)
 {
-	cp_intc_write(irq, CP_INTC_SYS_ENABLE_IDX_SET);
+	cp_intc_write(d->irq, CP_INTC_SYS_ENABLE_IDX_SET);
 }
 
-static int cp_intc_set_irq_type(unsigned int irq, unsigned int flow_type)
+static int cp_intc_set_irq_type(struct irq_data *d, unsigned int flow_type)
 {
-	unsigned reg		= BIT_WORD(irq);
-	unsigned mask		= BIT_MASK(irq);
+	unsigned reg		= BIT_WORD(d->irq);
+	unsigned mask		= BIT_MASK(d->irq);
 	unsigned polarity	= cp_intc_read(CP_INTC_SYS_POLARITY(reg));
 	unsigned type		= cp_intc_read(CP_INTC_SYS_TYPE(reg));
 
@@ -85,18 +85,18 @@
  * generic drivers which call {enable|disable}_irq_wake for
  * wake up interrupt sources (eg RTC on DA850).
  */
-static int cp_intc_set_wake(unsigned int irq, unsigned int on)
+static int cp_intc_set_wake(struct irq_data *d, unsigned int on)
 {
 	return 0;
 }
 
 static struct irq_chip cp_intc_irq_chip = {
 	.name		= "cp_intc",
-	.ack		= cp_intc_ack_irq,
-	.mask		= cp_intc_mask_irq,
-	.unmask		= cp_intc_unmask_irq,
-	.set_type	= cp_intc_set_irq_type,
-	.set_wake	= cp_intc_set_wake,
+	.irq_ack	= cp_intc_ack_irq,
+	.irq_mask	= cp_intc_mask_irq,
+	.irq_unmask	= cp_intc_unmask_irq,
+	.irq_set_type	= cp_intc_set_irq_type,
+	.irq_set_wake	= cp_intc_set_wake,
 };
 
 void __init cp_intc_init(void)
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index bf0ff58..20d66e5 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -205,20 +205,20 @@
  * serve as EDMA event triggers.
  */
 
-static void gpio_irq_disable(unsigned irq)
+static void gpio_irq_disable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(irq);
-	u32 mask = (u32) get_irq_data(irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	u32 mask = (u32) irq_data_get_irq_data(d);
 
 	__raw_writel(mask, &g->clr_falling);
 	__raw_writel(mask, &g->clr_rising);
 }
 
-static void gpio_irq_enable(unsigned irq)
+static void gpio_irq_enable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(irq);
-	u32 mask = (u32) get_irq_data(irq);
-	unsigned status = irq_desc[irq].status;
+	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	u32 mask = (u32) irq_data_get_irq_data(d);
+	unsigned status = irq_desc[d->irq].status;
 
 	status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
 	if (!status)
@@ -230,19 +230,19 @@
 		__raw_writel(mask, &g->set_rising);
 }
 
-static int gpio_irq_type(unsigned irq, unsigned trigger)
+static int gpio_irq_type(struct irq_data *d, unsigned trigger)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(irq);
-	u32 mask = (u32) get_irq_data(irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	u32 mask = (u32) irq_data_get_irq_data(d);
 
 	if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
 		return -EINVAL;
 
-	irq_desc[irq].status &= ~IRQ_TYPE_SENSE_MASK;
-	irq_desc[irq].status |= trigger;
+	irq_desc[d->irq].status &= ~IRQ_TYPE_SENSE_MASK;
+	irq_desc[d->irq].status |= trigger;
 
 	/* don't enable the IRQ if it's currently disabled */
-	if (irq_desc[irq].depth == 0) {
+	if (irq_desc[d->irq].depth == 0) {
 		__raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
 			     ? &g->set_falling : &g->clr_falling);
 		__raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING)
@@ -253,9 +253,9 @@
 
 static struct irq_chip gpio_irqchip = {
 	.name		= "GPIO",
-	.enable		= gpio_irq_enable,
-	.disable	= gpio_irq_disable,
-	.set_type	= gpio_irq_type,
+	.irq_enable	= gpio_irq_enable,
+	.irq_disable	= gpio_irq_disable,
+	.irq_set_type	= gpio_irq_type,
 };
 
 static void
@@ -269,8 +269,8 @@
 		mask <<= 16;
 
 	/* temporarily mask (level sensitive) parent IRQ */
-	desc->chip->mask(irq);
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	while (1) {
 		u32		status;
 		int		n;
@@ -293,7 +293,7 @@
 			status >>= res;
 		}
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 	/* now it may re-trigger */
 }
 
@@ -320,10 +320,10 @@
 		return -ENODEV;
 }
 
-static int gpio_irq_type_unbanked(unsigned irq, unsigned trigger)
+static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(irq);
-	u32 mask = (u32) get_irq_data(irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	u32 mask = (u32) irq_data_get_irq_data(d);
 
 	if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
 		return -EINVAL;
@@ -397,7 +397,7 @@
 		irq = bank_irq;
 		gpio_irqchip_unbanked = *get_irq_desc_chip(irq_to_desc(irq));
 		gpio_irqchip_unbanked.name = "GPIO-AINTC";
-		gpio_irqchip_unbanked.set_type = gpio_irq_type_unbanked;
+		gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked;
 
 		/* default trigger: both edges */
 		g = gpio2regs(0);
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c
index 784ddf3..5e05c9b 100644
--- a/arch/arm/mach-davinci/irq.c
+++ b/arch/arm/mach-davinci/irq.c
@@ -53,14 +53,14 @@
 }
 
 /* Disable interrupt */
-static void davinci_mask_irq(unsigned int irq)
+static void davinci_mask_irq(struct irq_data *d)
 {
 	unsigned int mask;
 	u32 l;
 
-	mask = 1 << IRQ_BIT(irq);
+	mask = 1 << IRQ_BIT(d->irq);
 
-	if (irq > 31) {
+	if (d->irq > 31) {
 		l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET);
 		l &= ~mask;
 		davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET);
@@ -72,14 +72,14 @@
 }
 
 /* Enable interrupt */
-static void davinci_unmask_irq(unsigned int irq)
+static void davinci_unmask_irq(struct irq_data *d)
 {
 	unsigned int mask;
 	u32 l;
 
-	mask = 1 << IRQ_BIT(irq);
+	mask = 1 << IRQ_BIT(d->irq);
 
-	if (irq > 31) {
+	if (d->irq > 31) {
 		l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET);
 		l |= mask;
 		davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET);
@@ -91,23 +91,23 @@
 }
 
 /* EOI interrupt */
-static void davinci_ack_irq(unsigned int irq)
+static void davinci_ack_irq(struct irq_data *d)
 {
 	unsigned int mask;
 
-	mask = 1 << IRQ_BIT(irq);
+	mask = 1 << IRQ_BIT(d->irq);
 
-	if (irq > 31)
+	if (d->irq > 31)
 		davinci_irq_writel(mask, IRQ_REG1_OFFSET);
 	else
 		davinci_irq_writel(mask, IRQ_REG0_OFFSET);
 }
 
 static struct irq_chip davinci_irq_chip_0 = {
-	.name	= "AINTC",
-	.ack	= davinci_ack_irq,
-	.mask	= davinci_mask_irq,
-	.unmask = davinci_unmask_irq,
+	.name		= "AINTC",
+	.irq_ack	= davinci_ack_irq,
+	.irq_mask	= davinci_mask_irq,
+	.irq_unmask	= davinci_unmask_irq,
 };
 
 /* ARM Interrupt Controller Initialization */
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
index 61bfcb3..9317f05 100644
--- a/arch/arm/mach-dove/irq.c
+++ b/arch/arm/mach-dove/irq.c
@@ -36,9 +36,9 @@
 	}
 }
 
-static void pmu_irq_mask(unsigned int irq)
+static void pmu_irq_mask(struct irq_data *d)
 {
-	int pin = irq_to_pmu(irq);
+	int pin = irq_to_pmu(d->irq);
 	u32 u;
 
 	u = readl(PMU_INTERRUPT_MASK);
@@ -46,9 +46,9 @@
 	writel(u, PMU_INTERRUPT_MASK);
 }
 
-static void pmu_irq_unmask(unsigned int irq)
+static void pmu_irq_unmask(struct irq_data *d)
 {
-	int pin = irq_to_pmu(irq);
+	int pin = irq_to_pmu(d->irq);
 	u32 u;
 
 	u = readl(PMU_INTERRUPT_MASK);
@@ -56,9 +56,9 @@
 	writel(u, PMU_INTERRUPT_MASK);
 }
 
-static void pmu_irq_ack(unsigned int irq)
+static void pmu_irq_ack(struct irq_data *d)
 {
-	int pin = irq_to_pmu(irq);
+	int pin = irq_to_pmu(d->irq);
 	u32 u;
 
 	u = ~(1 << (pin & 31));
@@ -67,9 +67,9 @@
 
 static struct irq_chip pmu_irq_chip = {
 	.name		= "pmu_irq",
-	.mask		= pmu_irq_mask,
-	.unmask		= pmu_irq_unmask,
-	.ack		= pmu_irq_ack,
+	.irq_mask	= pmu_irq_mask,
+	.irq_unmask	= pmu_irq_unmask,
+	.irq_ack	= pmu_irq_ack,
 };
 
 static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index 5df4099..7df083f 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -35,20 +35,20 @@
 #define IRQ_STAT		0xff000000	/* read */
 #define IRQ_MCLR		0xff000000	/* write */
 
-static void ebsa110_mask_irq(unsigned int irq)
+static void ebsa110_mask_irq(struct irq_data *d)
 {
-	__raw_writeb(1 << irq, IRQ_MCLR);
+	__raw_writeb(1 << d->irq, IRQ_MCLR);
 }
 
-static void ebsa110_unmask_irq(unsigned int irq)
+static void ebsa110_unmask_irq(struct irq_data *d)
 {
-	__raw_writeb(1 << irq, IRQ_MSET);
+	__raw_writeb(1 << d->irq, IRQ_MSET);
 }
 
 static struct irq_chip ebsa110_irq_chip = {
-	.ack	= ebsa110_mask_irq,
-	.mask	= ebsa110_mask_irq,
-	.unmask = ebsa110_unmask_irq,
+	.irq_ack	= ebsa110_mask_irq,
+	.irq_mask	= ebsa110_mask_irq,
+	.irq_unmask	= ebsa110_unmask_irq,
 };
  
 static void __init ebsa110_init_irq(void)
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c
index cf547ad..f3dc76f 100644
--- a/arch/arm/mach-ep93xx/gpio.c
+++ b/arch/arm/mach-ep93xx/gpio.c
@@ -112,13 +112,13 @@
 	generic_handle_irq(gpio_irq);
 }
 
-static void ep93xx_gpio_irq_ack(unsigned int irq)
+static void ep93xx_gpio_irq_ack(struct irq_data *d)
 {
-	int line = irq_to_gpio(irq);
+	int line = irq_to_gpio(d->irq);
 	int port = line >> 3;
 	int port_mask = 1 << (line & 7);
 
-	if ((irq_desc[irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+	if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
 		gpio_int_type2[port] ^= port_mask; /* switch edge direction */
 		ep93xx_gpio_update_int_params(port);
 	}
@@ -126,13 +126,13 @@
 	__raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
 }
 
-static void ep93xx_gpio_irq_mask_ack(unsigned int irq)
+static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
 {
-	int line = irq_to_gpio(irq);
+	int line = irq_to_gpio(d->irq);
 	int port = line >> 3;
 	int port_mask = 1 << (line & 7);
 
-	if ((irq_desc[irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+	if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
 		gpio_int_type2[port] ^= port_mask; /* switch edge direction */
 
 	gpio_int_unmasked[port] &= ~port_mask;
@@ -141,18 +141,18 @@
 	__raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
 }
 
-static void ep93xx_gpio_irq_mask(unsigned int irq)
+static void ep93xx_gpio_irq_mask(struct irq_data *d)
 {
-	int line = irq_to_gpio(irq);
+	int line = irq_to_gpio(d->irq);
 	int port = line >> 3;
 
 	gpio_int_unmasked[port] &= ~(1 << (line & 7));
 	ep93xx_gpio_update_int_params(port);
 }
 
-static void ep93xx_gpio_irq_unmask(unsigned int irq)
+static void ep93xx_gpio_irq_unmask(struct irq_data *d)
 {
-	int line = irq_to_gpio(irq);
+	int line = irq_to_gpio(d->irq);
 	int port = line >> 3;
 
 	gpio_int_unmasked[port] |= 1 << (line & 7);
@@ -164,10 +164,10 @@
  * edge (1) triggered, while gpio_int_type2 controls whether it
  * triggers on low/falling (0) or high/rising (1).
  */
-static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type)
+static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
 {
-	struct irq_desc *desc = irq_desc + irq;
-	const int gpio = irq_to_gpio(irq);
+	struct irq_desc *desc = irq_desc + d->irq;
+	const int gpio = irq_to_gpio(d->irq);
 	const int port = gpio >> 3;
 	const int port_mask = 1 << (gpio & 7);
 
@@ -220,11 +220,11 @@
 
 static struct irq_chip ep93xx_gpio_irq_chip = {
 	.name		= "GPIO",
-	.ack		= ep93xx_gpio_irq_ack,
-	.mask_ack	= ep93xx_gpio_irq_mask_ack,
-	.mask		= ep93xx_gpio_irq_mask,
-	.unmask		= ep93xx_gpio_irq_unmask,
-	.set_type	= ep93xx_gpio_irq_type,
+	.irq_ack	= ep93xx_gpio_irq_ack,
+	.irq_mask_ack	= ep93xx_gpio_irq_mask_ack,
+	.irq_mask	= ep93xx_gpio_irq_mask,
+	.irq_unmask	= ep93xx_gpio_irq_unmask,
+	.irq_set_type	= ep93xx_gpio_irq_type,
 };
 
 void __init ep93xx_gpio_init_irq(void)
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 88b3dd8..84c5f25 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -75,20 +75,20 @@
 	IRQ_MASK_PCI_PERR,	/* 19 */
 };
 
-static void fb_mask_irq(unsigned int irq)
+static void fb_mask_irq(struct irq_data *d)
 {
-	*CSR_IRQ_DISABLE = fb_irq_mask[_DC21285_INR(irq)];
+	*CSR_IRQ_DISABLE = fb_irq_mask[_DC21285_INR(d->irq)];
 }
 
-static void fb_unmask_irq(unsigned int irq)
+static void fb_unmask_irq(struct irq_data *d)
 {
-	*CSR_IRQ_ENABLE = fb_irq_mask[_DC21285_INR(irq)];
+	*CSR_IRQ_ENABLE = fb_irq_mask[_DC21285_INR(d->irq)];
 }
 
 static struct irq_chip fb_chip = {
-	.ack	= fb_mask_irq,
-	.mask	= fb_mask_irq,
-	.unmask = fb_unmask_irq,
+	.irq_ack	= fb_mask_irq,
+	.irq_mask	= fb_mask_irq,
+	.irq_unmask	= fb_unmask_irq,
 };
 
 static void __init __fb_init_irq(void)
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c
index 8bfd06a..de7a5cb 100644
--- a/arch/arm/mach-footbridge/isa-irq.c
+++ b/arch/arm/mach-footbridge/isa-irq.c
@@ -30,61 +30,61 @@
 
 #include "common.h"
 
-static void isa_mask_pic_lo_irq(unsigned int irq)
+static void isa_mask_pic_lo_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_LO) | mask, PIC_MASK_LO);
 }
 
-static void isa_ack_pic_lo_irq(unsigned int irq)
+static void isa_ack_pic_lo_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_LO) | mask, PIC_MASK_LO);
 	outb(0x20, PIC_LO);
 }
 
-static void isa_unmask_pic_lo_irq(unsigned int irq)
+static void isa_unmask_pic_lo_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_LO) & ~mask, PIC_MASK_LO);
 }
 
 static struct irq_chip isa_lo_chip = {
-	.ack	= isa_ack_pic_lo_irq,
-	.mask	= isa_mask_pic_lo_irq,
-	.unmask = isa_unmask_pic_lo_irq,
+	.irq_ack	= isa_ack_pic_lo_irq,
+	.irq_mask	= isa_mask_pic_lo_irq,
+	.irq_unmask	= isa_unmask_pic_lo_irq,
 };
 
-static void isa_mask_pic_hi_irq(unsigned int irq)
+static void isa_mask_pic_hi_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_HI) | mask, PIC_MASK_HI);
 }
 
-static void isa_ack_pic_hi_irq(unsigned int irq)
+static void isa_ack_pic_hi_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_HI) | mask, PIC_MASK_HI);
 	outb(0x62, PIC_LO);
 	outb(0x20, PIC_HI);
 }
 
-static void isa_unmask_pic_hi_irq(unsigned int irq)
+static void isa_unmask_pic_hi_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_HI) & ~mask, PIC_MASK_HI);
 }
 
 static struct irq_chip isa_hi_chip = {
-	.ack	= isa_ack_pic_hi_irq,
-	.mask	= isa_mask_pic_hi_irq,
-	.unmask = isa_unmask_pic_hi_irq,
+	.irq_ack	= isa_ack_pic_hi_irq,
+	.irq_mask	= isa_mask_pic_hi_irq,
+	.irq_unmask	= isa_unmask_pic_hi_irq,
 };
 
 static void
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c
index fe3bd5a..fa3d333 100644
--- a/arch/arm/mach-gemini/gpio.c
+++ b/arch/arm/mach-gemini/gpio.c
@@ -54,33 +54,33 @@
 	__raw_writel(reg, base + GPIO_INT_EN);
 }
 
-static void gpio_ack_irq(unsigned int irq)
+static void gpio_ack_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq_to_gpio(irq);
+	unsigned int gpio = irq_to_gpio(d->irq);
 	unsigned int base = GPIO_BASE(gpio / 32);
 
 	__raw_writel(1 << (gpio % 32), base + GPIO_INT_CLR);
 }
 
-static void gpio_mask_irq(unsigned int irq)
+static void gpio_mask_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq_to_gpio(irq);
+	unsigned int gpio = irq_to_gpio(d->irq);
 	unsigned int base = GPIO_BASE(gpio / 32);
 
 	_set_gpio_irqenable(base, gpio % 32, 0);
 }
 
-static void gpio_unmask_irq(unsigned int irq)
+static void gpio_unmask_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq_to_gpio(irq);
+	unsigned int gpio = irq_to_gpio(d->irq);
 	unsigned int base = GPIO_BASE(gpio / 32);
 
 	_set_gpio_irqenable(base, gpio % 32, 1);
 }
 
-static int gpio_set_irq_type(unsigned int irq, unsigned int type)
+static int gpio_set_irq_type(struct irq_data *d, unsigned int type)
 {
-	unsigned int gpio = irq_to_gpio(irq);
+	unsigned int gpio = irq_to_gpio(d->irq);
 	unsigned int gpio_mask = 1 << (gpio % 32);
 	unsigned int base = GPIO_BASE(gpio / 32);
 	unsigned int reg_both, reg_level, reg_type;
@@ -120,7 +120,7 @@
 	__raw_writel(reg_level, base + GPIO_INT_LEVEL);
 	__raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
 
-	gpio_ack_irq(irq);
+	gpio_ack_irq(d->irq);
 
 	return 0;
 }
@@ -146,10 +146,10 @@
 
 static struct irq_chip gpio_irq_chip = {
 	.name = "GPIO",
-	.ack = gpio_ack_irq,
-	.mask = gpio_mask_irq,
-	.unmask = gpio_unmask_irq,
-	.set_type = gpio_set_irq_type,
+	.irq_ack = gpio_ack_irq,
+	.irq_mask = gpio_mask_irq,
+	.irq_unmask = gpio_unmask_irq,
+	.irq_set_type = gpio_set_irq_type,
 };
 
 static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index 9e613ca..96bc227 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -32,34 +32,34 @@
 #define FIQ_LEVEL(base_addr)	(base_addr + 0x30)
 #define FIQ_STATUS(base_addr)	(base_addr + 0x34)
 
-static void gemini_ack_irq(unsigned int irq)
+static void gemini_ack_irq(struct irq_data *d)
 {
-	__raw_writel(1 << irq, IRQ_CLEAR(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
+	__raw_writel(1 << d->irq, IRQ_CLEAR(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
 }
 
-static void gemini_mask_irq(unsigned int irq)
+static void gemini_mask_irq(struct irq_data *d)
 {
 	unsigned int mask;
 
 	mask = __raw_readl(IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
-	mask &= ~(1 << irq);
+	mask &= ~(1 << d->irq);
 	__raw_writel(mask, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
 }
 
-static void gemini_unmask_irq(unsigned int irq)
+static void gemini_unmask_irq(struct irq_data *d)
 {
 	unsigned int mask;
 
 	mask = __raw_readl(IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
-	mask |= (1 << irq);
+	mask |= (1 << d->irq);
 	__raw_writel(mask, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
 }
 
 static struct irq_chip gemini_irq_chip = {
-	.name	= "INTC",
-	.ack	= gemini_ack_irq,
-	.mask	= gemini_mask_irq,
-	.unmask	= gemini_unmask_irq,
+	.name		= "INTC",
+	.irq_ack	= gemini_ack_irq,
+	.irq_mask	= gemini_mask_irq,
+	.irq_unmask	= gemini_unmask_irq,
 };
 
 static struct resource irq_resource = {
diff --git a/arch/arm/mach-h720x/common.c b/arch/arm/mach-h720x/common.c
index bdb3f67..1f28c90 100644
--- a/arch/arm/mach-h720x/common.c
+++ b/arch/arm/mach-h720x/common.c
@@ -52,17 +52,17 @@
 /*
  * mask Global irq's
  */
-static void mask_global_irq (unsigned int irq )
+static void mask_global_irq(struct irq_data *d)
 {
-	CPU_REG (IRQC_VIRT, IRQC_IER) &= ~(1 << irq);
+	CPU_REG (IRQC_VIRT, IRQC_IER) &= ~(1 << d->irq);
 }
 
 /*
  * unmask Global irq's
  */
-static void unmask_global_irq (unsigned int irq )
+static void unmask_global_irq(struct irq_data *d)
 {
-	CPU_REG (IRQC_VIRT, IRQC_IER) |= (1 << irq);
+	CPU_REG (IRQC_VIRT, IRQC_IER) |= (1 << d->irq);
 }
 
 
@@ -70,10 +70,10 @@
  * ack GPIO irq's
  * Ack only for edge triggered int's valid
  */
-static void inline ack_gpio_irq(u32 irq)
+static void inline ack_gpio_irq(struct irq_data *d)
 {
-	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(irq));
-	u32 bit = IRQ_TO_BIT(irq);
+	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(d->irq));
+	u32 bit = IRQ_TO_BIT(d->irq);
 	if ( (CPU_REG (reg_base, GPIO_EDGE) & bit))
 		CPU_REG (reg_base, GPIO_CLR) = bit;
 }
@@ -81,20 +81,20 @@
 /*
  * mask GPIO irq's
  */
-static void inline mask_gpio_irq(u32 irq)
+static void inline mask_gpio_irq(struct irq_data *d)
 {
-	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(irq));
-	u32 bit = IRQ_TO_BIT(irq);
+	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(d->irq));
+	u32 bit = IRQ_TO_BIT(d->irq);
 	CPU_REG (reg_base, GPIO_MASK) &= ~bit;
 }
 
 /*
  * unmask GPIO irq's
  */
-static void inline unmask_gpio_irq(u32 irq)
+static void inline unmask_gpio_irq(struct irq_data *d)
 {
-	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(irq));
-	u32 bit = IRQ_TO_BIT(irq);
+	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(d->irq));
+	u32 bit = IRQ_TO_BIT(d->irq);
 	CPU_REG (reg_base, GPIO_MASK) |= bit;
 }
 
@@ -170,15 +170,15 @@
 #endif
 
 static struct irq_chip h720x_global_chip = {
-	.ack = mask_global_irq,
-	.mask = mask_global_irq,
-	.unmask = unmask_global_irq,
+	.irq_ack = mask_global_irq,
+	.irq_mask = mask_global_irq,
+	.irq_unmask = unmask_global_irq,
 };
 
 static struct irq_chip h720x_gpio_chip = {
-	.ack = ack_gpio_irq,
-	.mask = mask_gpio_irq,
-	.unmask = unmask_gpio_irq,
+	.irq_ack = ack_gpio_irq,
+	.irq_mask = mask_gpio_irq,
+	.irq_unmask = unmask_gpio_irq,
 };
 
 /*
diff --git a/arch/arm/mach-h720x/cpu-h7202.c b/arch/arm/mach-h720x/cpu-h7202.c
index fd33a19..ac3f914 100644
--- a/arch/arm/mach-h720x/cpu-h7202.c
+++ b/arch/arm/mach-h720x/cpu-h7202.c
@@ -141,27 +141,27 @@
 /*
  * mask multiplexed timer IRQs
  */
-static void inline mask_timerx_irq (u32 irq)
+static void inline mask_timerx_irq(struct irq_data *d)
 {
 	unsigned int bit;
-	bit = 2 << ((irq == IRQ_TIMER64B) ? 4 : (irq - IRQ_TIMER1));
+	bit = 2 << ((d->irq == IRQ_TIMER64B) ? 4 : (d->irq - IRQ_TIMER1));
 	CPU_REG (TIMER_VIRT, TIMER_TOPCTRL) &= ~bit;
 }
 
 /*
  * unmask multiplexed timer IRQs
  */
-static void inline unmask_timerx_irq (u32 irq)
+static void inline unmask_timerx_irq(struct irq_data *d)
 {
 	unsigned int bit;
-	bit = 2 << ((irq == IRQ_TIMER64B) ? 4 : (irq - IRQ_TIMER1));
+	bit = 2 << ((d->irq == IRQ_TIMER64B) ? 4 : (d->irq - IRQ_TIMER1));
 	CPU_REG (TIMER_VIRT, TIMER_TOPCTRL) |= bit;
 }
 
 static struct irq_chip h7202_timerx_chip = {
-	.ack = mask_timerx_irq,
-	.mask = mask_timerx_irq,
-	.unmask = unmask_timerx_irq,
+	.irq_ack = mask_timerx_irq,
+	.irq_mask = mask_timerx_irq,
+	.irq_unmask = unmask_timerx_irq,
 };
 
 static struct irqaction h7202_timer_irq = {
diff --git a/arch/arm/mach-h720x/h7201-eval.c b/arch/arm/mach-h720x/h7201-eval.c
index 79f0b89..629454d 100644
--- a/arch/arm/mach-h720x/h7201-eval.c
+++ b/arch/arm/mach-h720x/h7201-eval.c
@@ -23,7 +23,6 @@
 #include <asm/types.h>
 #include <asm/mach-types.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/mach/arch.h>
 #include <mach/hardware.h>
 #include "common.h"
diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c
index cc28b1e..e9f46b6 100644
--- a/arch/arm/mach-h720x/h7202-eval.c
+++ b/arch/arm/mach-h720x/h7202-eval.c
@@ -23,7 +23,6 @@
 #include <asm/types.h>
 #include <asm/mach-types.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/mach/arch.h>
 #include <mach/irqs.h>
 #include <mach/hardware.h>
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 17d2e60..56684b5 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -243,6 +243,7 @@
 	select IMX_HAVE_PLATFORM_MXC_EHCI
 	select IMX_HAVE_PLATFORM_MXC_MMC
 	select IMX_HAVE_PLATFORM_SPI_IMX
+	select MXC_DEBUG_BOARD
 	select MXC_ULPI if USB_ULPI
 	help
 	  Include support for MX27PDK platform. This includes specific
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index 6fd0f8f..1643315 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -37,12 +37,15 @@
 #include <mach/common.h>
 #include <mach/iomux-mx27.h>
 #include <mach/ulpi.h>
+#include <mach/irqs.h>
+#include <mach/3ds_debugboard.h>
 
 #include "devices-imx27.h"
 
 #define SD1_EN_GPIO (GPIO_PORTB + 25)
 #define OTG_PHY_RESET_GPIO (GPIO_PORTB + 23)
 #define SPI2_SS0 (GPIO_PORTD + 21)
+#define EXPIO_PARENT_INT	(MXC_INTERNAL_IRQS + GPIO_PORTC + 28)
 
 static const int mx27pdk_pins[] __initconst = {
 	/* UART1 */
@@ -215,10 +218,10 @@
 
 static struct mc13783_regulator_init_data mx27_3ds_regulators[] = {
 	{
-		.id = MC13783_REGU_VMMC1,
+		.id = MC13783_REG_VMMC1,
 		.init_data = &vmmc1_init,
 	}, {
-		.id = MC13783_REGU_VGEN,
+		.id = MC13783_REG_VGEN,
 		.init_data = &vgen_init,
 	},
 };
@@ -276,6 +279,9 @@
 	imx27_add_spi_imx1(&spi2_pdata);
 	spi_register_board_info(mx27_3ds_spi_devs,
 						ARRAY_SIZE(mx27_3ds_spi_devs));
+
+	if (mxc_expio_init(MX27_CS5_BASE_ADDR, EXPIO_PARENT_INT))
+		pr_warn("Init of the debugboard failed, all devices on the debugboard are unusable.\n");
 }
 
 static void __init mx27pdk_timer_init(void)
diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c
index a3fbcb3a..fbb4577 100644
--- a/arch/arm/mach-integrator/cpu.c
+++ b/arch/arm/mach-integrator/cpu.c
@@ -173,7 +173,7 @@
 
 	if (machine_is_integrator()) {
 		vco.s = (cm_osc >> 8) & 7;
-	} else if (machine_is_cintegrator()) {
+	} else {
 		vco.s = 1;
 	}
 	vco.v = cm_osc & 255;
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 2774df8..b666443 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -156,21 +156,21 @@
 
 #define INTEGRATOR_SC_VALID_INT	0x003fffff
 
-static void sc_mask_irq(unsigned int irq)
+static void sc_mask_irq(struct irq_data *d)
 {
-	writel(1 << irq, VA_IC_BASE + IRQ_ENABLE_CLEAR);
+	writel(1 << d->irq, VA_IC_BASE + IRQ_ENABLE_CLEAR);
 }
 
-static void sc_unmask_irq(unsigned int irq)
+static void sc_unmask_irq(struct irq_data *d)
 {
-	writel(1 << irq, VA_IC_BASE + IRQ_ENABLE_SET);
+	writel(1 << d->irq, VA_IC_BASE + IRQ_ENABLE_SET);
 }
 
 static struct irq_chip sc_chip = {
-	.name	= "SC",
-	.ack	= sc_mask_irq,
-	.mask	= sc_mask_irq,
-	.unmask = sc_unmask_irq,
+	.name		= "SC",
+	.irq_ack	= sc_mask_irq,
+	.irq_mask	= sc_mask_irq,
+	.irq_unmask	= sc_unmask_irq,
 };
 
 static void __init ap_init_irq(void)
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 85e48a5..e9327da 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -146,61 +146,61 @@
 #define sic_writel	__raw_writel
 #define sic_readl	__raw_readl
 
-static void cic_mask_irq(unsigned int irq)
+static void cic_mask_irq(struct irq_data *d)
 {
-	irq -= IRQ_CIC_START;
+	unsigned int irq = d->irq - IRQ_CIC_START;
 	cic_writel(1 << irq, INTCP_VA_CIC_BASE + IRQ_ENABLE_CLEAR);
 }
 
-static void cic_unmask_irq(unsigned int irq)
+static void cic_unmask_irq(struct irq_data *d)
 {
-	irq -= IRQ_CIC_START;
+	unsigned int irq = d->irq - IRQ_CIC_START;
 	cic_writel(1 << irq, INTCP_VA_CIC_BASE + IRQ_ENABLE_SET);
 }
 
 static struct irq_chip cic_chip = {
-	.name	= "CIC",
-	.ack	= cic_mask_irq,
-	.mask	= cic_mask_irq,
-	.unmask	= cic_unmask_irq,
+	.name		= "CIC",
+	.irq_ack	= cic_mask_irq,
+	.irq_mask	= cic_mask_irq,
+	.irq_unmask	= cic_unmask_irq,
 };
 
-static void pic_mask_irq(unsigned int irq)
+static void pic_mask_irq(struct irq_data *d)
 {
-	irq -= IRQ_PIC_START;
+	unsigned int irq = d->irq - IRQ_PIC_START;
 	pic_writel(1 << irq, INTCP_VA_PIC_BASE + IRQ_ENABLE_CLEAR);
 }
 
-static void pic_unmask_irq(unsigned int irq)
+static void pic_unmask_irq(struct irq_data *d)
 {
-	irq -= IRQ_PIC_START;
+	unsigned int irq = d->irq - IRQ_PIC_START;
 	pic_writel(1 << irq, INTCP_VA_PIC_BASE + IRQ_ENABLE_SET);
 }
 
 static struct irq_chip pic_chip = {
-	.name	= "PIC",
-	.ack	= pic_mask_irq,
-	.mask	= pic_mask_irq,
-	.unmask = pic_unmask_irq,
+	.name		= "PIC",
+	.irq_ack	= pic_mask_irq,
+	.irq_mask	= pic_mask_irq,
+	.irq_unmask	= pic_unmask_irq,
 };
 
-static void sic_mask_irq(unsigned int irq)
+static void sic_mask_irq(struct irq_data *d)
 {
-	irq -= IRQ_SIC_START;
+	unsigned int irq = d->irq - IRQ_SIC_START;
 	sic_writel(1 << irq, INTCP_VA_SIC_BASE + IRQ_ENABLE_CLEAR);
 }
 
-static void sic_unmask_irq(unsigned int irq)
+static void sic_unmask_irq(struct irq_data *d)
 {
-	irq -= IRQ_SIC_START;
+	unsigned int irq = d->irq - IRQ_SIC_START;
 	sic_writel(1 << irq, INTCP_VA_SIC_BASE + IRQ_ENABLE_SET);
 }
 
 static struct irq_chip sic_chip = {
-	.name	= "SIC",
-	.ack	= sic_mask_irq,
-	.mask	= sic_mask_irq,
-	.unmask	= sic_unmask_irq,
+	.name		= "SIC",
+	.irq_ack	= sic_mask_irq,
+	.irq_mask	= sic_mask_irq,
+	.irq_unmask	= sic_unmask_irq,
 };
 
 static void
diff --git a/arch/arm/mach-iop13xx/irq.c b/arch/arm/mach-iop13xx/irq.c
index 0d099ca..a233470 100644
--- a/arch/arm/mach-iop13xx/irq.c
+++ b/arch/arm/mach-iop13xx/irq.c
@@ -123,79 +123,79 @@
 
 /* 0 = Interrupt Masked and 1 = Interrupt not masked */
 static void
-iop13xx_irq_mask0 (unsigned int irq)
+iop13xx_irq_mask0 (struct irq_data *d)
 {
-	write_intctl_0(read_intctl_0() & ~(1 << (irq - 0)));
+	write_intctl_0(read_intctl_0() & ~(1 << (d->irq - 0)));
 }
 
 static void
-iop13xx_irq_mask1 (unsigned int irq)
+iop13xx_irq_mask1 (struct irq_data *d)
 {
-	write_intctl_1(read_intctl_1() & ~(1 << (irq - 32)));
+	write_intctl_1(read_intctl_1() & ~(1 << (d->irq - 32)));
 }
 
 static void
-iop13xx_irq_mask2 (unsigned int irq)
+iop13xx_irq_mask2 (struct irq_data *d)
 {
-	write_intctl_2(read_intctl_2() & ~(1 << (irq - 64)));
+	write_intctl_2(read_intctl_2() & ~(1 << (d->irq - 64)));
 }
 
 static void
-iop13xx_irq_mask3 (unsigned int irq)
+iop13xx_irq_mask3 (struct irq_data *d)
 {
-	write_intctl_3(read_intctl_3() & ~(1 << (irq - 96)));
+	write_intctl_3(read_intctl_3() & ~(1 << (d->irq - 96)));
 }
 
 static void
-iop13xx_irq_unmask0(unsigned int irq)
+iop13xx_irq_unmask0(struct irq_data *d)
 {
-	write_intctl_0(read_intctl_0() | (1 << (irq - 0)));
+	write_intctl_0(read_intctl_0() | (1 << (d->irq - 0)));
 }
 
 static void
-iop13xx_irq_unmask1(unsigned int irq)
+iop13xx_irq_unmask1(struct irq_data *d)
 {
-	write_intctl_1(read_intctl_1() | (1 << (irq - 32)));
+	write_intctl_1(read_intctl_1() | (1 << (d->irq - 32)));
 }
 
 static void
-iop13xx_irq_unmask2(unsigned int irq)
+iop13xx_irq_unmask2(struct irq_data *d)
 {
-	write_intctl_2(read_intctl_2() | (1 << (irq - 64)));
+	write_intctl_2(read_intctl_2() | (1 << (d->irq - 64)));
 }
 
 static void
-iop13xx_irq_unmask3(unsigned int irq)
+iop13xx_irq_unmask3(struct irq_data *d)
 {
-	write_intctl_3(read_intctl_3() | (1 << (irq - 96)));
+	write_intctl_3(read_intctl_3() | (1 << (d->irq - 96)));
 }
 
 static struct irq_chip iop13xx_irqchip1 = {
-	.name	= "IOP13xx-1",
-	.ack    = iop13xx_irq_mask0,
-	.mask   = iop13xx_irq_mask0,
-	.unmask = iop13xx_irq_unmask0,
+	.name       = "IOP13xx-1",
+	.irq_ack    = iop13xx_irq_mask0,
+	.irq_mask   = iop13xx_irq_mask0,
+	.irq_unmask = iop13xx_irq_unmask0,
 };
 
 static struct irq_chip iop13xx_irqchip2 = {
-	.name	= "IOP13xx-2",
-	.ack    = iop13xx_irq_mask1,
-	.mask   = iop13xx_irq_mask1,
-	.unmask = iop13xx_irq_unmask1,
+	.name       = "IOP13xx-2",
+	.irq_ack    = iop13xx_irq_mask1,
+	.irq_mask   = iop13xx_irq_mask1,
+	.irq_unmask = iop13xx_irq_unmask1,
 };
 
 static struct irq_chip iop13xx_irqchip3 = {
-	.name	= "IOP13xx-3",
-	.ack    = iop13xx_irq_mask2,
-	.mask   = iop13xx_irq_mask2,
-	.unmask = iop13xx_irq_unmask2,
+	.name       = "IOP13xx-3",
+	.irq_ack    = iop13xx_irq_mask2,
+	.irq_mask   = iop13xx_irq_mask2,
+	.irq_unmask = iop13xx_irq_unmask2,
 };
 
 static struct irq_chip iop13xx_irqchip4 = {
-	.name	= "IOP13xx-4",
-	.ack    = iop13xx_irq_mask3,
-	.mask   = iop13xx_irq_mask3,
-	.unmask = iop13xx_irq_unmask3,
+	.name       = "IOP13xx-4",
+	.irq_ack    = iop13xx_irq_mask3,
+	.irq_mask   = iop13xx_irq_mask3,
+	.irq_unmask = iop13xx_irq_unmask3,
 };
 
 extern void iop_init_cp6_handler(void);
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c
index 7149fcc..c9c02e3 100644
--- a/arch/arm/mach-iop13xx/msi.c
+++ b/arch/arm/mach-iop13xx/msi.c
@@ -156,14 +156,14 @@
 	destroy_irq(irq);
 }
 
-static void iop13xx_msi_nop(unsigned int irq)
+static void iop13xx_msi_nop(struct irq_data *d)
 {
 	return;
 }
 
 static struct irq_chip iop13xx_msi_chip = {
 	.name = "PCI-MSI",
-	.ack = iop13xx_msi_nop,
+	.irq_ack = iop13xx_msi_nop,
 	.irq_enable = unmask_msi_irq,
 	.irq_disable = mask_msi_irq,
 	.irq_mask = mask_msi_irq,
diff --git a/arch/arm/mach-iop32x/irq.c b/arch/arm/mach-iop32x/irq.c
index ba59b2d..d3426a1 100644
--- a/arch/arm/mach-iop32x/irq.c
+++ b/arch/arm/mach-iop32x/irq.c
@@ -32,24 +32,24 @@
 }
 
 static void
-iop32x_irq_mask(unsigned int irq)
+iop32x_irq_mask(struct irq_data *d)
 {
-	iop32x_mask &= ~(1 << irq);
+	iop32x_mask &= ~(1 << d->irq);
 	intctl_write(iop32x_mask);
 }
 
 static void
-iop32x_irq_unmask(unsigned int irq)
+iop32x_irq_unmask(struct irq_data *d)
 {
-	iop32x_mask |= 1 << irq;
+	iop32x_mask |= 1 << d->irq;
 	intctl_write(iop32x_mask);
 }
 
 struct irq_chip ext_chip = {
-	.name	= "IOP32x",
-	.ack	= iop32x_irq_mask,
-	.mask	= iop32x_irq_mask,
-	.unmask	= iop32x_irq_unmask,
+	.name		= "IOP32x",
+	.irq_ack	= iop32x_irq_mask,
+	.irq_mask	= iop32x_irq_mask,
+	.irq_unmask	= iop32x_irq_unmask,
 };
 
 void __init iop32x_init_irq(void)
diff --git a/arch/arm/mach-iop33x/irq.c b/arch/arm/mach-iop33x/irq.c
index abb4ea2..0ff2f74 100644
--- a/arch/arm/mach-iop33x/irq.c
+++ b/arch/arm/mach-iop33x/irq.c
@@ -53,45 +53,45 @@
 }
 
 static void
-iop33x_irq_mask1 (unsigned int irq)
+iop33x_irq_mask1 (struct irq_data *d)
 {
-	iop33x_mask0 &= ~(1 << irq);
+	iop33x_mask0 &= ~(1 << d->irq);
 	intctl0_write(iop33x_mask0);
 }
 
 static void
-iop33x_irq_mask2 (unsigned int irq)
+iop33x_irq_mask2 (struct irq_data *d)
 {
-	iop33x_mask1 &= ~(1 << (irq - 32));
+	iop33x_mask1 &= ~(1 << (d->irq - 32));
 	intctl1_write(iop33x_mask1);
 }
 
 static void
-iop33x_irq_unmask1(unsigned int irq)
+iop33x_irq_unmask1(struct irq_data *d)
 {
-	iop33x_mask0 |= 1 << irq;
+	iop33x_mask0 |= 1 << d->irq;
 	intctl0_write(iop33x_mask0);
 }
 
 static void
-iop33x_irq_unmask2(unsigned int irq)
+iop33x_irq_unmask2(struct irq_data *d)
 {
-	iop33x_mask1 |= (1 << (irq - 32));
+	iop33x_mask1 |= (1 << (d->irq - 32));
 	intctl1_write(iop33x_mask1);
 }
 
 struct irq_chip iop33x_irqchip1 = {
-	.name	= "IOP33x-1",
-	.ack	= iop33x_irq_mask1,
-	.mask	= iop33x_irq_mask1,
-	.unmask	= iop33x_irq_unmask1,
+	.name		= "IOP33x-1",
+	.irq_ack	= iop33x_irq_mask1,
+	.irq_mask	= iop33x_irq_mask1,
+	.irq_unmask	= iop33x_irq_unmask1,
 };
 
 struct irq_chip iop33x_irqchip2 = {
-	.name	= "IOP33x-2",
-	.ack	= iop33x_irq_mask2,
-	.mask	= iop33x_irq_mask2,
-	.unmask	= iop33x_irq_unmask2,
+	.name		= "IOP33x-2",
+	.irq_ack	= iop33x_irq_mask2,
+	.irq_mask	= iop33x_irq_mask2,
+	.irq_unmask	= iop33x_irq_unmask2,
 };
 
 void __init iop33x_init_irq(void)
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index e24e3d0..5fc4e06 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -309,9 +309,9 @@
 	}
 }
 
-static int ixp2000_GPIO_irq_type(unsigned int irq, unsigned int type)
+static int ixp2000_GPIO_irq_type(struct irq_data *d, unsigned int type)
 {
-	int line = irq - IRQ_IXP2000_GPIO0;
+	int line = d->irq - IRQ_IXP2000_GPIO0;
 
 	/*
 	 * First, configure this GPIO line as an input.
@@ -342,8 +342,10 @@
 	return 0;
 }
 
-static void ixp2000_GPIO_irq_mask_ack(unsigned int irq)
+static void ixp2000_GPIO_irq_mask_ack(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	ixp2000_reg_write(IXP2000_GPIO_INCR, (1 << (irq - IRQ_IXP2000_GPIO0)));
 
 	ixp2000_reg_write(IXP2000_GPIO_EDSR, (1 << (irq - IRQ_IXP2000_GPIO0)));
@@ -351,38 +353,42 @@
 	ixp2000_reg_wrb(IXP2000_GPIO_INST, (1 << (irq - IRQ_IXP2000_GPIO0)));
 }
 
-static void ixp2000_GPIO_irq_mask(unsigned int irq)
+static void ixp2000_GPIO_irq_mask(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	ixp2000_reg_wrb(IXP2000_GPIO_INCR, (1 << (irq - IRQ_IXP2000_GPIO0)));
 }
 
-static void ixp2000_GPIO_irq_unmask(unsigned int irq)
+static void ixp2000_GPIO_irq_unmask(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	ixp2000_reg_write(IXP2000_GPIO_INSR, (1 << (irq - IRQ_IXP2000_GPIO0)));
 }
 
 static struct irq_chip ixp2000_GPIO_irq_chip = {
-	.ack		= ixp2000_GPIO_irq_mask_ack,
-	.mask		= ixp2000_GPIO_irq_mask,
-	.unmask		= ixp2000_GPIO_irq_unmask,
-	.set_type	= ixp2000_GPIO_irq_type,
+	.irq_ack	= ixp2000_GPIO_irq_mask_ack,
+	.irq_mask	= ixp2000_GPIO_irq_mask,
+	.irq_unmask	= ixp2000_GPIO_irq_unmask,
+	.irq_set_type	= ixp2000_GPIO_irq_type,
 };
 
-static void ixp2000_pci_irq_mask(unsigned int irq)
+static void ixp2000_pci_irq_mask(struct irq_data *d)
 {
 	unsigned long temp = *IXP2000_PCI_XSCALE_INT_ENABLE;
-	if (irq == IRQ_IXP2000_PCIA)
+	if (d->irq == IRQ_IXP2000_PCIA)
 		ixp2000_reg_wrb(IXP2000_PCI_XSCALE_INT_ENABLE, (temp & ~(1 << 26)));
-	else if (irq == IRQ_IXP2000_PCIB)
+	else if (d->irq == IRQ_IXP2000_PCIB)
 		ixp2000_reg_wrb(IXP2000_PCI_XSCALE_INT_ENABLE, (temp & ~(1 << 27)));
 }
 
-static void ixp2000_pci_irq_unmask(unsigned int irq)
+static void ixp2000_pci_irq_unmask(struct irq_data *d)
 {
 	unsigned long temp = *IXP2000_PCI_XSCALE_INT_ENABLE;
-	if (irq == IRQ_IXP2000_PCIA)
+	if (d->irq == IRQ_IXP2000_PCIA)
 		ixp2000_reg_write(IXP2000_PCI_XSCALE_INT_ENABLE, (temp | (1 << 26)));
-	else if (irq == IRQ_IXP2000_PCIB)
+	else if (d->irq == IRQ_IXP2000_PCIB)
 		ixp2000_reg_write(IXP2000_PCI_XSCALE_INT_ENABLE, (temp | (1 << 27)));
 }
 
@@ -401,44 +407,44 @@
 	}
 }
 
-static void ixp2000_err_irq_mask(unsigned int irq)
+static void ixp2000_err_irq_mask(struct irq_data *d)
 {
 	ixp2000_reg_write(IXP2000_IRQ_ERR_ENABLE_CLR,
-			(1 << (irq - IRQ_IXP2000_DRAM0_MIN_ERR)));
+			(1 << (d->irq - IRQ_IXP2000_DRAM0_MIN_ERR)));
 }
 
-static void ixp2000_err_irq_unmask(unsigned int irq)
+static void ixp2000_err_irq_unmask(struct irq_data *d)
 {
 	ixp2000_reg_write(IXP2000_IRQ_ERR_ENABLE_SET,
-			(1 << (irq - IRQ_IXP2000_DRAM0_MIN_ERR)));
+			(1 << (d->irq - IRQ_IXP2000_DRAM0_MIN_ERR)));
 }
 
 static struct irq_chip ixp2000_err_irq_chip = {
-	.ack	= ixp2000_err_irq_mask,
-	.mask	= ixp2000_err_irq_mask,
-	.unmask	= ixp2000_err_irq_unmask
+	.irq_ack	= ixp2000_err_irq_mask,
+	.irq_mask	= ixp2000_err_irq_mask,
+	.irq_unmask	= ixp2000_err_irq_unmask
 };
 
 static struct irq_chip ixp2000_pci_irq_chip = {
-	.ack	= ixp2000_pci_irq_mask,
-	.mask	= ixp2000_pci_irq_mask,
-	.unmask	= ixp2000_pci_irq_unmask
+	.irq_ack	= ixp2000_pci_irq_mask,
+	.irq_mask	= ixp2000_pci_irq_mask,
+	.irq_unmask	= ixp2000_pci_irq_unmask
 };
 
-static void ixp2000_irq_mask(unsigned int irq)
+static void ixp2000_irq_mask(struct irq_data *d)
 {
-	ixp2000_reg_wrb(IXP2000_IRQ_ENABLE_CLR, (1 << irq));
+	ixp2000_reg_wrb(IXP2000_IRQ_ENABLE_CLR, (1 << d->irq));
 }
 
-static void ixp2000_irq_unmask(unsigned int irq)
+static void ixp2000_irq_unmask(struct irq_data *d)
 {
-	ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << irq));
+	ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << d->irq));
 }
 
 static struct irq_chip ixp2000_irq_chip = {
-	.ack	= ixp2000_irq_mask,
-	.mask	= ixp2000_irq_mask,
-	.unmask	= ixp2000_irq_unmask
+	.irq_ack	= ixp2000_irq_mask,
+	.irq_mask	= ixp2000_irq_mask,
+	.irq_unmask	= ixp2000_irq_unmask
 };
 
 void __init ixp2000_init_irq(void)
diff --git a/arch/arm/mach-ixp2000/ixdp2x00.c b/arch/arm/mach-ixp2000/ixdp2x00.c
index 91fffb9..7d90d3f 100644
--- a/arch/arm/mach-ixp2000/ixdp2x00.c
+++ b/arch/arm/mach-ixp2000/ixdp2x00.c
@@ -63,7 +63,7 @@
 };
 #endif
 
-static void ixdp2x00_irq_mask(unsigned int irq)
+static void ixdp2x00_irq_mask(struct irq_data *d)
 {
 	unsigned long dummy;
 	static struct slowport_cfg old_cfg;
@@ -78,7 +78,7 @@
 #endif
 
 	dummy = *board_irq_mask;
-	dummy |=  IXP2000_BOARD_IRQ_MASK(irq);
+	dummy |=  IXP2000_BOARD_IRQ_MASK(d->irq);
 	ixp2000_reg_wrb(board_irq_mask, dummy);
 
 #ifdef CONFIG_ARCH_IXDP2400
@@ -87,7 +87,7 @@
 #endif
 }
 
-static void ixdp2x00_irq_unmask(unsigned int irq)
+static void ixdp2x00_irq_unmask(struct irq_data *d)
 {
 	unsigned long dummy;
 	static struct slowport_cfg old_cfg;
@@ -98,7 +98,7 @@
 #endif
 
 	dummy = *board_irq_mask;
-	dummy &=  ~IXP2000_BOARD_IRQ_MASK(irq);
+	dummy &=  ~IXP2000_BOARD_IRQ_MASK(d->irq);
 	ixp2000_reg_wrb(board_irq_mask, dummy);
 
 	if (machine_is_ixdp2400()) 
@@ -111,7 +111,7 @@
 	static struct slowport_cfg old_cfg;
 	int i;
 
-	desc->chip->mask(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 #ifdef CONFIG_ARCH_IXDP2400
 	if (machine_is_ixdp2400())
@@ -133,13 +133,13 @@
 		}
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixdp2x00_cpld_irq_chip = {
-	.ack	= ixdp2x00_irq_mask,
-	.mask	= ixdp2x00_irq_mask,
-	.unmask	= ixdp2x00_irq_unmask
+	.irq_ack	= ixdp2x00_irq_mask,
+	.irq_mask	= ixdp2x00_irq_mask,
+	.irq_unmask	= ixdp2x00_irq_unmask
 };
 
 void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_of_irqs)
diff --git a/arch/arm/mach-ixp2000/ixdp2x01.c b/arch/arm/mach-ixp2000/ixdp2x01.c
index 6c121bd..34b1b2a 100644
--- a/arch/arm/mach-ixp2000/ixdp2x01.c
+++ b/arch/arm/mach-ixp2000/ixdp2x01.c
@@ -48,16 +48,16 @@
 /*************************************************************************
  * IXDP2x01 IRQ Handling
  *************************************************************************/
-static void ixdp2x01_irq_mask(unsigned int irq)
+static void ixdp2x01_irq_mask(struct irq_data *d)
 {
 	ixp2000_reg_wrb(IXDP2X01_INT_MASK_SET_REG,
-				IXP2000_BOARD_IRQ_MASK(irq));
+				IXP2000_BOARD_IRQ_MASK(d->irq));
 }
 
-static void ixdp2x01_irq_unmask(unsigned int irq)
+static void ixdp2x01_irq_unmask(struct irq_data *d)
 {
 	ixp2000_reg_write(IXDP2X01_INT_MASK_CLR_REG,
-				IXP2000_BOARD_IRQ_MASK(irq));
+				IXP2000_BOARD_IRQ_MASK(d->irq));
 }
 
 static u32 valid_irq_mask;
@@ -67,7 +67,7 @@
 	u32 ex_interrupt;
 	int i;
 
-	desc->chip->mask(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 	ex_interrupt = *IXDP2X01_INT_STAT_REG & valid_irq_mask;
 
@@ -83,13 +83,13 @@
 		}
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixdp2x01_irq_chip = {
-	.mask	= ixdp2x01_irq_mask,
-	.ack	= ixdp2x01_irq_mask,
-	.unmask	= ixdp2x01_irq_unmask
+	.irq_mask	= ixdp2x01_irq_mask,
+	.irq_ack	= ixdp2x01_irq_mask,
+	.irq_unmask	= ixdp2x01_irq_unmask
 };
 
 /*
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index aa4c442..9c8a339 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -111,9 +111,9 @@
 
 static void ixp23xx_config_irq(unsigned int, enum ixp23xx_irq_type);
 
-static int ixp23xx_irq_set_type(unsigned int irq, unsigned int type)
+static int ixp23xx_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	int line = irq - IRQ_IXP23XX_GPIO6 + 6;
+	int line = d->irq - IRQ_IXP23XX_GPIO6 + 6;
 	u32 int_style;
 	enum ixp23xx_irq_type irq_type;
 	volatile u32 *int_reg;
@@ -149,7 +149,7 @@
 		return -EINVAL;
 	}
 
-	ixp23xx_config_irq(irq, irq_type);
+	ixp23xx_config_irq(d->irq, irq_type);
 
 	if (line >= 8) {	/* pins 8-15 */
 		line -= 8;
@@ -173,9 +173,10 @@
 	return 0;
 }
 
-static void ixp23xx_irq_mask(unsigned int irq)
+static void ixp23xx_irq_mask(struct irq_data *d)
 {
 	volatile unsigned long *intr_reg;
+	unsigned int irq = d->irq;
 
 	if (irq >= 56)
 		irq += 8;
@@ -184,9 +185,9 @@
 	*intr_reg &= ~(1 << (irq % 32));
 }
 
-static void ixp23xx_irq_ack(unsigned int irq)
+static void ixp23xx_irq_ack(struct irq_data *d)
 {
-	int line = irq - IRQ_IXP23XX_GPIO6 + 6;
+	int line = d->irq - IRQ_IXP23XX_GPIO6 + 6;
 
 	if ((line < 6) || (line > 15))
 		return;
@@ -198,11 +199,12 @@
  * Level triggered interrupts on GPIO lines can only be cleared when the
  * interrupt condition disappears.
  */
-static void ixp23xx_irq_level_unmask(unsigned int irq)
+static void ixp23xx_irq_level_unmask(struct irq_data *d)
 {
 	volatile unsigned long *intr_reg;
+	unsigned int irq = d->irq;
 
-	ixp23xx_irq_ack(irq);
+	ixp23xx_irq_ack(d);
 
 	if (irq >= 56)
 		irq += 8;
@@ -211,9 +213,10 @@
 	*intr_reg |= (1 << (irq % 32));
 }
 
-static void ixp23xx_irq_edge_unmask(unsigned int irq)
+static void ixp23xx_irq_edge_unmask(struct irq_data *d)
 {
 	volatile unsigned long *intr_reg;
+	unsigned int irq = d->irq;
 
 	if (irq >= 56)
 		irq += 8;
@@ -223,26 +226,30 @@
 }
 
 static struct irq_chip ixp23xx_irq_level_chip = {
-	.ack		= ixp23xx_irq_mask,
-	.mask		= ixp23xx_irq_mask,
-	.unmask		= ixp23xx_irq_level_unmask,
-	.set_type	= ixp23xx_irq_set_type
+	.irq_ack	= ixp23xx_irq_mask,
+	.irq_mask	= ixp23xx_irq_mask,
+	.irq_unmask	= ixp23xx_irq_level_unmask,
+	.irq_set_type	= ixp23xx_irq_set_type
 };
 
 static struct irq_chip ixp23xx_irq_edge_chip = {
-	.ack		= ixp23xx_irq_ack,
-	.mask		= ixp23xx_irq_mask,
-	.unmask		= ixp23xx_irq_edge_unmask,
-	.set_type	= ixp23xx_irq_set_type
+	.irq_ack	= ixp23xx_irq_ack,
+	.irq_mask	= ixp23xx_irq_mask,
+	.irq_unmask	= ixp23xx_irq_edge_unmask,
+	.irq_set_type	= ixp23xx_irq_set_type
 };
 
-static void ixp23xx_pci_irq_mask(unsigned int irq)
+static void ixp23xx_pci_irq_mask(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	*IXP23XX_PCI_XSCALE_INT_ENABLE &= ~(1 << (IRQ_IXP23XX_INTA + 27 - irq));
 }
 
-static void ixp23xx_pci_irq_unmask(unsigned int irq)
+static void ixp23xx_pci_irq_unmask(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	*IXP23XX_PCI_XSCALE_INT_ENABLE |= (1 << (IRQ_IXP23XX_INTA + 27 - irq));
 }
 
@@ -256,7 +263,7 @@
 
 	pci_interrupt = *IXP23XX_PCI_XSCALE_INT_STATUS;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	/* See which PCI_INTA, or PCI_INTB interrupted */
 	if (pci_interrupt & (1 << 26)) {
@@ -269,13 +276,13 @@
 
 	generic_handle_irq(irqno);
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixp23xx_pci_irq_chip = {
-	.ack	= ixp23xx_pci_irq_mask,
-	.mask	= ixp23xx_pci_irq_mask,
-	.unmask	= ixp23xx_pci_irq_unmask
+	.irq_ack	= ixp23xx_pci_irq_mask,
+	.irq_mask	= ixp23xx_pci_irq_mask,
+	.irq_unmask	= ixp23xx_pci_irq_unmask
 };
 
 static void ixp23xx_config_irq(unsigned int irq, enum ixp23xx_irq_type type)
diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c
index 664e39c..181116a 100644
--- a/arch/arm/mach-ixp23xx/ixdp2351.c
+++ b/arch/arm/mach-ixp23xx/ixdp2351.c
@@ -48,14 +48,14 @@
 /*
  * IXDP2351 Interrupt Handling
  */
-static void ixdp2351_inta_mask(unsigned int irq)
+static void ixdp2351_inta_mask(struct irq_data *d)
 {
-	*IXDP2351_CPLD_INTA_MASK_SET_REG = IXDP2351_INTA_IRQ_MASK(irq);
+	*IXDP2351_CPLD_INTA_MASK_SET_REG = IXDP2351_INTA_IRQ_MASK(d->irq);
 }
 
-static void ixdp2351_inta_unmask(unsigned int irq)
+static void ixdp2351_inta_unmask(struct irq_data *d)
 {
-	*IXDP2351_CPLD_INTA_MASK_CLR_REG = IXDP2351_INTA_IRQ_MASK(irq);
+	*IXDP2351_CPLD_INTA_MASK_CLR_REG = IXDP2351_INTA_IRQ_MASK(d->irq);
 }
 
 static void ixdp2351_inta_handler(unsigned int irq, struct irq_desc *desc)
@@ -64,7 +64,7 @@
 		*IXDP2351_CPLD_INTA_STAT_REG & IXDP2351_INTA_IRQ_VALID;
 	int i;
 
-	desc->chip->mask(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 	for (i = 0; i < IXDP2351_INTA_IRQ_NUM; i++) {
 		if (ex_interrupt & (1 << i)) {
@@ -74,23 +74,23 @@
 		}
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixdp2351_inta_chip = {
-	.ack	= ixdp2351_inta_mask,
-	.mask	= ixdp2351_inta_mask,
-	.unmask	= ixdp2351_inta_unmask
+	.irq_ack	= ixdp2351_inta_mask,
+	.irq_mask	= ixdp2351_inta_mask,
+	.irq_unmask	= ixdp2351_inta_unmask
 };
 
-static void ixdp2351_intb_mask(unsigned int irq)
+static void ixdp2351_intb_mask(struct irq_data *d)
 {
-	*IXDP2351_CPLD_INTB_MASK_SET_REG = IXDP2351_INTB_IRQ_MASK(irq);
+	*IXDP2351_CPLD_INTB_MASK_SET_REG = IXDP2351_INTB_IRQ_MASK(d->irq);
 }
 
-static void ixdp2351_intb_unmask(unsigned int irq)
+static void ixdp2351_intb_unmask(struct irq_data *d)
 {
-	*IXDP2351_CPLD_INTB_MASK_CLR_REG = IXDP2351_INTB_IRQ_MASK(irq);
+	*IXDP2351_CPLD_INTB_MASK_CLR_REG = IXDP2351_INTB_IRQ_MASK(d->irq);
 }
 
 static void ixdp2351_intb_handler(unsigned int irq, struct irq_desc *desc)
@@ -99,7 +99,7 @@
 		*IXDP2351_CPLD_INTB_STAT_REG & IXDP2351_INTB_IRQ_VALID;
 	int i;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	for (i = 0; i < IXDP2351_INTB_IRQ_NUM; i++) {
 		if (ex_interrupt & (1 << i)) {
@@ -109,13 +109,13 @@
 		}
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixdp2351_intb_chip = {
-	.ack	= ixdp2351_intb_mask,
-	.mask	= ixdp2351_intb_mask,
-	.unmask	= ixdp2351_intb_unmask
+	.irq_ack	= ixdp2351_intb_mask,
+	.irq_mask	= ixdp2351_intb_mask,
+	.irq_unmask	= ixdp2351_intb_unmask
 };
 
 void __init ixdp2351_init_irq(void)
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 4dbfcbb..4dc68d6 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -128,9 +128,9 @@
 }
 EXPORT_SYMBOL(irq_to_gpio);
 
-static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type)
+static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type)
 {
-	int line = irq2gpio[irq];
+	int line = irq2gpio[d->irq];
 	u32 int_style;
 	enum ixp4xx_irq_type irq_type;
 	volatile u32 *int_reg;
@@ -167,9 +167,9 @@
 	}
 
 	if (irq_type == IXP4XX_IRQ_EDGE)
-		ixp4xx_irq_edge |= (1 << irq);
+		ixp4xx_irq_edge |= (1 << d->irq);
 	else
-		ixp4xx_irq_edge &= ~(1 << irq);
+		ixp4xx_irq_edge &= ~(1 << d->irq);
 
 	if (line >= 8) {	/* pins 8-15 */
 		line -= 8;
@@ -188,22 +188,22 @@
 	*int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
 
 	/* Configure the line as an input */
-	gpio_line_config(irq2gpio[irq], IXP4XX_GPIO_IN);
+	gpio_line_config(irq2gpio[d->irq], IXP4XX_GPIO_IN);
 
 	return 0;
 }
 
-static void ixp4xx_irq_mask(unsigned int irq)
+static void ixp4xx_irq_mask(struct irq_data *d)
 {
-	if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && irq >= 32)
-		*IXP4XX_ICMR2 &= ~(1 << (irq - 32));
+	if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32)
+		*IXP4XX_ICMR2 &= ~(1 << (d->irq - 32));
 	else
-		*IXP4XX_ICMR &= ~(1 << irq);
+		*IXP4XX_ICMR &= ~(1 << d->irq);
 }
 
-static void ixp4xx_irq_ack(unsigned int irq)
+static void ixp4xx_irq_ack(struct irq_data *d)
 {
-	int line = (irq < 32) ? irq2gpio[irq] : -1;
+	int line = (d->irq < 32) ? irq2gpio[d->irq] : -1;
 
 	if (line >= 0)
 		*IXP4XX_GPIO_GPISR = (1 << line);
@@ -213,23 +213,23 @@
  * Level triggered interrupts on GPIO lines can only be cleared when the
  * interrupt condition disappears.
  */
-static void ixp4xx_irq_unmask(unsigned int irq)
+static void ixp4xx_irq_unmask(struct irq_data *d)
 {
-	if (!(ixp4xx_irq_edge & (1 << irq)))
-		ixp4xx_irq_ack(irq);
+	if (!(ixp4xx_irq_edge & (1 << d->irq)))
+		ixp4xx_irq_ack(d);
 
-	if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && irq >= 32)
-		*IXP4XX_ICMR2 |= (1 << (irq - 32));
+	if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32)
+		*IXP4XX_ICMR2 |= (1 << (d->irq - 32));
 	else
-		*IXP4XX_ICMR |= (1 << irq);
+		*IXP4XX_ICMR |= (1 << d->irq);
 }
 
 static struct irq_chip ixp4xx_irq_chip = {
 	.name		= "IXP4xx",
-	.ack		= ixp4xx_irq_ack,
-	.mask		= ixp4xx_irq_mask,
-	.unmask		= ixp4xx_irq_unmask,
-	.set_type	= ixp4xx_set_irq_type,
+	.irq_ack	= ixp4xx_irq_ack,
+	.irq_mask	= ixp4xx_irq_mask,
+	.irq_unmask	= ixp4xx_irq_unmask,
+	.irq_set_type	= ixp4xx_set_irq_type,
 };
 
 void __init ixp4xx_init_irq(void)
diff --git a/arch/arm/mach-ks8695/irq.c b/arch/arm/mach-ks8695/irq.c
index e375c1d..7998cca 100644
--- a/arch/arm/mach-ks8695/irq.c
+++ b/arch/arm/mach-ks8695/irq.c
@@ -34,29 +34,29 @@
 #include <mach/regs-irq.h>
 #include <mach/regs-gpio.h>
 
-static void ks8695_irq_mask(unsigned int irqno)
+static void ks8695_irq_mask(struct irq_data *d)
 {
 	unsigned long inten;
 
 	inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN);
-	inten &= ~(1 << irqno);
+	inten &= ~(1 << d->irq);
 
 	__raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN);
 }
 
-static void ks8695_irq_unmask(unsigned int irqno)
+static void ks8695_irq_unmask(struct irq_data *d)
 {
 	unsigned long inten;
 
 	inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN);
-	inten |= (1 << irqno);
+	inten |= (1 << d->irq);
 
 	__raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN);
 }
 
-static void ks8695_irq_ack(unsigned int irqno)
+static void ks8695_irq_ack(struct irq_data *d)
 {
-	__raw_writel((1 << irqno), KS8695_IRQ_VA + KS8695_INTST);
+	__raw_writel((1 << d->irq), KS8695_IRQ_VA + KS8695_INTST);
 }
 
 
@@ -64,7 +64,7 @@
 static struct irq_chip ks8695_irq_edge_chip;
 
 
-static int ks8695_irq_set_type(unsigned int irqno, unsigned int type)
+static int ks8695_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	unsigned long ctrl, mode;
 	unsigned short level_triggered = 0;
@@ -93,7 +93,7 @@
 			return -EINVAL;
 	}
 
-	switch (irqno) {
+	switch (d->irq) {
 		case KS8695_IRQ_EXTERN0:
 			ctrl &= ~IOPC_IOEINT0TM;
 			ctrl |= IOPC_IOEINT0_MODE(mode);
@@ -115,12 +115,12 @@
 	}
 
 	if (level_triggered) {
-		set_irq_chip(irqno, &ks8695_irq_level_chip);
-		set_irq_handler(irqno, handle_level_irq);
+		set_irq_chip(d->irq, &ks8695_irq_level_chip);
+		set_irq_handler(d->irq, handle_level_irq);
 	}
 	else {
-		set_irq_chip(irqno, &ks8695_irq_edge_chip);
-		set_irq_handler(irqno, handle_edge_irq);
+		set_irq_chip(d->irq, &ks8695_irq_edge_chip);
+		set_irq_handler(d->irq, handle_edge_irq);
 	}
 
 	__raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC);
@@ -128,17 +128,17 @@
 }
 
 static struct irq_chip ks8695_irq_level_chip = {
-	.ack		= ks8695_irq_mask,
-	.mask		= ks8695_irq_mask,
-	.unmask		= ks8695_irq_unmask,
-	.set_type	= ks8695_irq_set_type,
+	.irq_ack	= ks8695_irq_mask,
+	.irq_mask	= ks8695_irq_mask,
+	.irq_unmask	= ks8695_irq_unmask,
+	.irq_set_type	= ks8695_irq_set_type,
 };
 
 static struct irq_chip ks8695_irq_edge_chip = {
-	.ack		= ks8695_irq_ack,
-	.mask		= ks8695_irq_mask,
-	.unmask		= ks8695_irq_unmask,
-	.set_type	= ks8695_irq_set_type,
+	.irq_ack	= ks8695_irq_ack,
+	.irq_mask	= ks8695_irq_mask,
+	.irq_unmask	= ks8695_irq_unmask,
+	.irq_set_type	= ks8695_irq_set_type,
 };
 
 void __init ks8695_init_irq(void)
@@ -164,7 +164,8 @@
 
 			/* Edge-triggered interrupts */
 			default:
-				ks8695_irq_ack(irq);	/* clear pending bit */
+				/* clear pending bit */
+				ks8695_irq_ack(irq_get_irq_data(irq));
 				set_irq_chip(irq, &ks8695_irq_edge_chip);
 				set_irq_handler(irq, handle_edge_irq);
 		}
diff --git a/arch/arm/mach-lh7a40x/arch-kev7a400.c b/arch/arm/mach-lh7a40x/arch-kev7a400.c
index 9088c166..71129c3 100644
--- a/arch/arm/mach-lh7a40x/arch-kev7a400.c
+++ b/arch/arm/mach-lh7a40x/arch-kev7a400.c
@@ -46,28 +46,28 @@
 
 static u16 CPLD_IRQ_mask;	/* Mask for CPLD IRQs, 1 == unmasked */
 
-static void kev7a400_ack_cpld_irq (u32 irq)
+static void kev7a400_ack_cpld_irq(struct irq_data *d)
 {
-	CPLD_CL_INT = 1 << (irq - IRQ_KEV7A400_CPLD);
+	CPLD_CL_INT = 1 << (d->irq - IRQ_KEV7A400_CPLD);
 }
 
-static void kev7a400_mask_cpld_irq (u32 irq)
+static void kev7a400_mask_cpld_irq(struct irq_data *d)
 {
-	CPLD_IRQ_mask &= ~(1 << (irq - IRQ_KEV7A400_CPLD));
+	CPLD_IRQ_mask &= ~(1 << (d->irq - IRQ_KEV7A400_CPLD));
 	CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
 }
 
-static void kev7a400_unmask_cpld_irq (u32 irq)
+static void kev7a400_unmask_cpld_irq(struct irq_data *d)
 {
-	CPLD_IRQ_mask |= 1 << (irq - IRQ_KEV7A400_CPLD);
+	CPLD_IRQ_mask |= 1 << (d->irq - IRQ_KEV7A400_CPLD);
 	CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
 }
 
 static struct irq_chip kev7a400_cpld_chip = {
-	.name	= "CPLD",
-	.ack	= kev7a400_ack_cpld_irq,
-	.mask	= kev7a400_mask_cpld_irq,
-	.unmask	= kev7a400_unmask_cpld_irq,
+	.name		= "CPLD",
+	.irq_ack	= kev7a400_ack_cpld_irq,
+	.irq_mask	= kev7a400_mask_cpld_irq,
+	.irq_unmask	= kev7a400_unmask_cpld_irq,
 };
 
 
diff --git a/arch/arm/mach-lh7a40x/arch-lpd7a40x.c b/arch/arm/mach-lh7a40x/arch-lpd7a40x.c
index 7315a56..e735546 100644
--- a/arch/arm/mach-lh7a40x/arch-lpd7a40x.c
+++ b/arch/arm/mach-lh7a40x/arch-lpd7a40x.c
@@ -159,7 +159,7 @@
 #endif
 }
 
-static void lh7a40x_ack_cpld_irq (u32 irq)
+static void lh7a40x_ack_cpld_irq(struct irq_data *d)
 {
 	/* CPLD doesn't have ack capability, but some devices may */
 
@@ -167,14 +167,14 @@
 	/* The touch control *must* mask the interrupt because the
 	 * interrupt bit is read by the driver to determine if the pen
 	 * is still down. */
-	if (irq == IRQ_TOUCH)
+	if (d->irq == IRQ_TOUCH)
 		CPLD_INTERRUPTS |= CPLD_INTMASK_TOUCH;
 #endif
 }
 
-static void lh7a40x_mask_cpld_irq (u32 irq)
+static void lh7a40x_mask_cpld_irq(struct irq_data *d)
 {
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_LPD7A40X_ETH_INT:
 		CPLD_INTERRUPTS |= CPLD_INTMASK_ETHERNET;
 		break;
@@ -186,9 +186,9 @@
 	}
 }
 
-static void lh7a40x_unmask_cpld_irq (u32 irq)
+static void lh7a40x_unmask_cpld_irq(struct irq_data *d)
 {
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_LPD7A40X_ETH_INT:
 		CPLD_INTERRUPTS &= ~CPLD_INTMASK_ETHERNET;
 		break;
@@ -201,17 +201,17 @@
 }
 
 static struct irq_chip lpd7a40x_cpld_chip = {
-	.name	= "CPLD",
-	.ack	= lh7a40x_ack_cpld_irq,
-	.mask	= lh7a40x_mask_cpld_irq,
-	.unmask	= lh7a40x_unmask_cpld_irq,
+	.name		= "CPLD",
+	.irq_ack	= lh7a40x_ack_cpld_irq,
+	.irq_mask	= lh7a40x_mask_cpld_irq,
+	.irq_unmask	= lh7a40x_unmask_cpld_irq,
 };
 
 static void lpd7a40x_cpld_handler (unsigned int irq, struct irq_desc *desc)
 {
 	unsigned int mask = CPLD_INTERRUPTS;
 
-	desc->chip->ack (irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	if ((mask & (1<<0)) == 0)	/* WLAN */
 		generic_handle_irq(IRQ_LPD7A40X_ETH_INT);
@@ -221,7 +221,8 @@
 		generic_handle_irq(IRQ_TOUCH);
 #endif
 
-	desc->chip->unmask (irq); /* Level-triggered need this */
+	/* Level-triggered need this */
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 
diff --git a/arch/arm/mach-lh7a40x/irq-lh7a400.c b/arch/arm/mach-lh7a40x/irq-lh7a400.c
index 1ad3afc..f2e7e65 100644
--- a/arch/arm/mach-lh7a40x/irq-lh7a400.c
+++ b/arch/arm/mach-lh7a40x/irq-lh7a400.c
@@ -21,34 +21,34 @@
 
   /* CPU IRQ handling */
 
-static void lh7a400_mask_irq (u32 irq)
+static void lh7a400_mask_irq(struct irq_data *d)
 {
-	INTC_INTENC = (1 << irq);
+	INTC_INTENC = (1 << d->irq);
 }
 
-static void lh7a400_unmask_irq (u32 irq)
+static void lh7a400_unmask_irq(struct irq_data *d)
 {
-	INTC_INTENS = (1 << irq);
+	INTC_INTENS = (1 << d->irq);
 }
 
-static void lh7a400_ack_gpio_irq (u32 irq)
+static void lh7a400_ack_gpio_irq(struct irq_data *d)
 {
-	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (irq));
-	INTC_INTENC = (1 << irq);
+	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (d->irq));
+	INTC_INTENC = (1 << d->irq);
 }
 
 static struct irq_chip lh7a400_internal_chip = {
-	.name	= "MPU",
-	.ack	= lh7a400_mask_irq, /* Level triggering -> mask is ack */
-	.mask	= lh7a400_mask_irq,
-	.unmask	= lh7a400_unmask_irq,
+	.name		= "MPU",
+	.irq_ack	= lh7a400_mask_irq, /* Level triggering -> mask is ack */
+	.irq_mask	= lh7a400_mask_irq,
+	.irq_unmask	= lh7a400_unmask_irq,
 };
 
 static struct irq_chip lh7a400_gpio_chip = {
-	.name	= "GPIO",
-	.ack	= lh7a400_ack_gpio_irq,
-	.mask	= lh7a400_mask_irq,
-	.unmask	= lh7a400_unmask_irq,
+	.name		= "GPIO",
+	.irq_ack	= lh7a400_ack_gpio_irq,
+	.irq_mask	= lh7a400_mask_irq,
+	.irq_unmask	= lh7a400_unmask_irq,
 };
 
 
diff --git a/arch/arm/mach-lh7a40x/irq-lh7a404.c b/arch/arm/mach-lh7a40x/irq-lh7a404.c
index 12b045b..14b1733 100644
--- a/arch/arm/mach-lh7a40x/irq-lh7a404.c
+++ b/arch/arm/mach-lh7a40x/irq-lh7a404.c
@@ -43,64 +43,64 @@
 
   /* CPU IRQ handling */
 
-static void lh7a404_vic1_mask_irq (u32 irq)
+static void lh7a404_vic1_mask_irq(struct irq_data *d)
 {
-	VIC1_INTENCLR = (1 << irq);
+	VIC1_INTENCLR = (1 << d->irq);
 }
 
-static void lh7a404_vic1_unmask_irq (u32 irq)
+static void lh7a404_vic1_unmask_irq(struct irq_data *d)
 {
-	VIC1_INTEN = (1 << irq);
+	VIC1_INTEN = (1 << d->irq);
 }
 
-static void lh7a404_vic2_mask_irq (u32 irq)
+static void lh7a404_vic2_mask_irq(struct irq_data *d)
 {
-	VIC2_INTENCLR = (1 << (irq - 32));
+	VIC2_INTENCLR = (1 << (d->irq - 32));
 }
 
-static void lh7a404_vic2_unmask_irq (u32 irq)
+static void lh7a404_vic2_unmask_irq(struct irq_data *d)
 {
-	VIC2_INTEN = (1 << (irq - 32));
+	VIC2_INTEN = (1 << (d->irq - 32));
 }
 
-static void lh7a404_vic1_ack_gpio_irq (u32 irq)
+static void lh7a404_vic1_ack_gpio_irq(struct irq_data *d)
 {
-	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (irq));
-	VIC1_INTENCLR = (1 << irq);
+	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (d->irq));
+	VIC1_INTENCLR = (1 << d->irq);
 }
 
-static void lh7a404_vic2_ack_gpio_irq (u32 irq)
+static void lh7a404_vic2_ack_gpio_irq(struct irq_data *d)
 {
-	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (irq));
-	VIC2_INTENCLR = (1 << irq);
+	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (d->irq));
+	VIC2_INTENCLR = (1 << d->irq);
 }
 
 static struct irq_chip lh7a404_vic1_chip = {
-	.name	= "VIC1",
-	.ack	= lh7a404_vic1_mask_irq, /* Because level-triggered */
-	.mask	= lh7a404_vic1_mask_irq,
-	.unmask	= lh7a404_vic1_unmask_irq,
+	.name		= "VIC1",
+	.irq_ack	= lh7a404_vic1_mask_irq, /* Because level-triggered */
+	.irq_mask	= lh7a404_vic1_mask_irq,
+	.irq_unmask	= lh7a404_vic1_unmask_irq,
 };
 
 static struct irq_chip lh7a404_vic2_chip = {
-	.name	= "VIC2",
-	.ack	= lh7a404_vic2_mask_irq, /* Because level-triggered */
-	.mask	= lh7a404_vic2_mask_irq,
-	.unmask	= lh7a404_vic2_unmask_irq,
+	.name		= "VIC2",
+	.irq_ack	= lh7a404_vic2_mask_irq, /* Because level-triggered */
+	.irq_mask	= lh7a404_vic2_mask_irq,
+	.irq_unmask	= lh7a404_vic2_unmask_irq,
 };
 
 static struct irq_chip lh7a404_gpio_vic1_chip = {
-	.name	= "GPIO-VIC1",
-	.ack	= lh7a404_vic1_ack_gpio_irq,
-	.mask	= lh7a404_vic1_mask_irq,
-	.unmask	= lh7a404_vic1_unmask_irq,
+	.name		= "GPIO-VIC1",
+	.irq_ack	= lh7a404_vic1_ack_gpio_irq,
+	.irq_mask	= lh7a404_vic1_mask_irq,
+	.irq_unmask	= lh7a404_vic1_unmask_irq,
 };
 
 static struct irq_chip lh7a404_gpio_vic2_chip = {
-	.name	= "GPIO-VIC2",
-	.ack	= lh7a404_vic2_ack_gpio_irq,
-	.mask	= lh7a404_vic2_mask_irq,
-	.unmask	= lh7a404_vic2_unmask_irq,
+	.name		= "GPIO-VIC2",
+	.irq_ack	= lh7a404_vic2_ack_gpio_irq,
+	.irq_mask	= lh7a404_vic2_mask_irq,
+	.irq_unmask	= lh7a404_vic2_unmask_irq,
 };
 
   /* IRQ initialization */
diff --git a/arch/arm/mach-lh7a40x/irq-lpd7a40x.c b/arch/arm/mach-lh7a40x/irq-lpd7a40x.c
index fd033bb..1bfdcdd 100644
--- a/arch/arm/mach-lh7a40x/irq-lpd7a40x.c
+++ b/arch/arm/mach-lh7a40x/irq-lpd7a40x.c
@@ -20,14 +20,14 @@
 
 #include "common.h"
 
-static void lh7a40x_ack_cpld_irq (u32 irq)
+static void lh7a40x_ack_cpld_irq(struct irq_data *d)
 {
 	/* CPLD doesn't have ack capability */
 }
 
-static void lh7a40x_mask_cpld_irq (u32 irq)
+static void lh7a40x_mask_cpld_irq(struct irq_data *d)
 {
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_LPD7A40X_ETH_INT:
 		CPLD_INTERRUPTS = CPLD_INTERRUPTS | 0x4;
 		break;
@@ -37,9 +37,9 @@
 	}
 }
 
-static void lh7a40x_unmask_cpld_irq (u32 irq)
+static void lh7a40x_unmask_cpld_irq(struct irq_data *d)
 {
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_LPD7A40X_ETH_INT:
 		CPLD_INTERRUPTS = CPLD_INTERRUPTS & ~ 0x4;
 		break;
@@ -50,17 +50,17 @@
 }
 
 static struct irq_chip lh7a40x_cpld_chip = {
-	.name	= "CPLD",
-	.ack	= lh7a40x_ack_cpld_irq,
-	.mask	= lh7a40x_mask_cpld_irq,
-	.unmask	= lh7a40x_unmask_cpld_irq,
+	.name		= "CPLD",
+	.irq_ack	= lh7a40x_ack_cpld_irq,
+	.irq_mask	= lh7a40x_mask_cpld_irq,
+	.irq_unmask	= lh7a40x_unmask_cpld_irq,
 };
 
 static void lh7a40x_cpld_handler (unsigned int irq, struct irq_desc *desc)
 {
 	unsigned int mask = CPLD_INTERRUPTS;
 
-	desc->chip->ack (irq);
+	desc->irq_data.chip->ack (irq);
 
 	if ((mask & 0x1) == 0)	/* WLAN */
 		generic_handle_irq(IRQ_LPD7A40X_ETH_INT);
@@ -68,7 +68,7 @@
 	if ((mask & 0x2) == 0)	/* Touch */
 		generic_handle_irq(IRQ_LPD7A400_TS);
 
-	desc->chip->unmask (irq); /* Level-triggered need this */
+	desc->irq_data.chip->unmask (irq); /* Level-triggered need this */
 }
 
 
diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c
index bd0df26c..316ecbf 100644
--- a/arch/arm/mach-lpc32xx/irq.c
+++ b/arch/arm/mach-lpc32xx/irq.c
@@ -191,38 +191,38 @@
 	}
 }
 
-static void lpc32xx_mask_irq(unsigned int irq)
+static void lpc32xx_mask_irq(struct irq_data *d)
 {
 	unsigned int reg, ctrl, mask;
 
-	get_controller(irq, &ctrl, &mask);
+	get_controller(d->irq, &ctrl, &mask);
 
 	reg = __raw_readl(LPC32XX_INTC_MASK(ctrl)) & ~mask;
 	__raw_writel(reg, LPC32XX_INTC_MASK(ctrl));
 }
 
-static void lpc32xx_unmask_irq(unsigned int irq)
+static void lpc32xx_unmask_irq(struct irq_data *d)
 {
 	unsigned int reg, ctrl, mask;
 
-	get_controller(irq, &ctrl, &mask);
+	get_controller(d->irq, &ctrl, &mask);
 
 	reg = __raw_readl(LPC32XX_INTC_MASK(ctrl)) | mask;
 	__raw_writel(reg, LPC32XX_INTC_MASK(ctrl));
 }
 
-static void lpc32xx_ack_irq(unsigned int irq)
+static void lpc32xx_ack_irq(struct irq_data *d)
 {
 	unsigned int ctrl, mask;
 
-	get_controller(irq, &ctrl, &mask);
+	get_controller(d->irq, &ctrl, &mask);
 
 	__raw_writel(mask, LPC32XX_INTC_RAW_STAT(ctrl));
 
 	/* Also need to clear pending wake event */
-	if (lpc32xx_events[irq].mask != 0)
-		__raw_writel(lpc32xx_events[irq].mask,
-			lpc32xx_events[irq].event_group->rawstat_reg);
+	if (lpc32xx_events[d->irq].mask != 0)
+		__raw_writel(lpc32xx_events[d->irq].mask,
+			lpc32xx_events[d->irq].event_group->rawstat_reg);
 }
 
 static void __lpc32xx_set_irq_type(unsigned int irq, int use_high_level,
@@ -261,27 +261,27 @@
 	}
 }
 
-static int lpc32xx_set_irq_type(unsigned int irq, unsigned int type)
+static int lpc32xx_set_irq_type(struct irq_data *d, unsigned int type)
 {
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
 		/* Rising edge sensitive */
-		__lpc32xx_set_irq_type(irq, 1, 1);
+		__lpc32xx_set_irq_type(d->irq, 1, 1);
 		break;
 
 	case IRQ_TYPE_EDGE_FALLING:
 		/* Falling edge sensitive */
-		__lpc32xx_set_irq_type(irq, 0, 1);
+		__lpc32xx_set_irq_type(d->irq, 0, 1);
 		break;
 
 	case IRQ_TYPE_LEVEL_LOW:
 		/* Low level sensitive */
-		__lpc32xx_set_irq_type(irq, 0, 0);
+		__lpc32xx_set_irq_type(d->irq, 0, 0);
 		break;
 
 	case IRQ_TYPE_LEVEL_HIGH:
 		/* High level sensitive */
-		__lpc32xx_set_irq_type(irq, 1, 0);
+		__lpc32xx_set_irq_type(d->irq, 1, 0);
 		break;
 
 	/* Other modes are not supported */
@@ -290,33 +290,33 @@
 	}
 
 	/* Ok to use the level handler for all types */
-	set_irq_handler(irq, handle_level_irq);
+	set_irq_handler(d->irq, handle_level_irq);
 
 	return 0;
 }
 
-static int lpc32xx_irq_wake(unsigned int irqno, unsigned int state)
+static int lpc32xx_irq_wake(struct irq_data *d, unsigned int state)
 {
 	unsigned long eventreg;
 
-	if (lpc32xx_events[irqno].mask != 0) {
-		eventreg = __raw_readl(lpc32xx_events[irqno].
+	if (lpc32xx_events[d->irq].mask != 0) {
+		eventreg = __raw_readl(lpc32xx_events[d->irq].
 			event_group->enab_reg);
 
 		if (state)
-			eventreg |= lpc32xx_events[irqno].mask;
+			eventreg |= lpc32xx_events[d->irq].mask;
 		else
-			eventreg &= ~lpc32xx_events[irqno].mask;
+			eventreg &= ~lpc32xx_events[d->irq].mask;
 
 		__raw_writel(eventreg,
-			lpc32xx_events[irqno].event_group->enab_reg);
+			lpc32xx_events[d->irq].event_group->enab_reg);
 
 		return 0;
 	}
 
 	/* Clear event */
-	__raw_writel(lpc32xx_events[irqno].mask,
-		lpc32xx_events[irqno].event_group->rawstat_reg);
+	__raw_writel(lpc32xx_events[d->irq].mask,
+		lpc32xx_events[d->irq].event_group->rawstat_reg);
 
 	return -ENODEV;
 }
@@ -336,11 +336,11 @@
 }
 
 static struct irq_chip lpc32xx_irq_chip = {
-	.ack = lpc32xx_ack_irq,
-	.mask = lpc32xx_mask_irq,
-	.unmask = lpc32xx_unmask_irq,
-	.set_type = lpc32xx_set_irq_type,
-	.set_wake = lpc32xx_irq_wake
+	.irq_ack = lpc32xx_ack_irq,
+	.irq_mask = lpc32xx_mask_irq,
+	.irq_unmask = lpc32xx_unmask_irq,
+	.irq_set_type = lpc32xx_set_irq_type,
+	.irq_set_wake = lpc32xx_irq_wake
 };
 
 static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/arm/mach-mmp/include/mach/mfp-mmp2.h b/arch/arm/mach-mmp/include/mach/mfp-mmp2.h
index 117e303..4ad3862 100644
--- a/arch/arm/mach-mmp/include/mach/mfp-mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mfp-mmp2.h
@@ -6,7 +6,7 @@
 #define MFP_DRIVE_VERY_SLOW	(0x0 << 13)
 #define MFP_DRIVE_SLOW		(0x2 << 13)
 #define MFP_DRIVE_MEDIUM	(0x4 << 13)
-#define MFP_DRIVE_FAST		(0x8 << 13)
+#define MFP_DRIVE_FAST		(0x6 << 13)
 
 /* GPIO */
 #define GPIO0_GPIO	MFP_CFG(GPIO0, AF0)
diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa910.h b/arch/arm/mach-mmp/include/mach/mfp-pxa910.h
index 7e8a80f..fbd7ee8 100644
--- a/arch/arm/mach-mmp/include/mach/mfp-pxa910.h
+++ b/arch/arm/mach-mmp/include/mach/mfp-pxa910.h
@@ -6,7 +6,7 @@
 #define MFP_DRIVE_VERY_SLOW	(0x0 << 13)
 #define MFP_DRIVE_SLOW		(0x2 << 13)
 #define MFP_DRIVE_MEDIUM	(0x4 << 13)
-#define MFP_DRIVE_FAST		(0x8 << 13)
+#define MFP_DRIVE_FAST		(0x6 << 13)
 
 /* UART2 */
 #define GPIO47_UART2_RXD	MFP_CFG(GPIO47, AF6)
diff --git a/arch/arm/mach-mmp/irq-mmp2.c b/arch/arm/mach-mmp/irq-mmp2.c
index 01342be..fa03703 100644
--- a/arch/arm/mach-mmp/irq-mmp2.c
+++ b/arch/arm/mach-mmp/irq-mmp2.c
@@ -20,48 +20,48 @@
 
 #include "common.h"
 
-static void icu_mask_irq(unsigned int irq)
+static void icu_mask_irq(struct irq_data *d)
 {
-	uint32_t r = __raw_readl(ICU_INT_CONF(irq));
+	uint32_t r = __raw_readl(ICU_INT_CONF(d->irq));
 
 	r &= ~ICU_INT_ROUTE_PJ4_IRQ;
-	__raw_writel(r, ICU_INT_CONF(irq));
+	__raw_writel(r, ICU_INT_CONF(d->irq));
 }
 
-static void icu_unmask_irq(unsigned int irq)
+static void icu_unmask_irq(struct irq_data *d)
 {
-	uint32_t r = __raw_readl(ICU_INT_CONF(irq));
+	uint32_t r = __raw_readl(ICU_INT_CONF(d->irq));
 
 	r |= ICU_INT_ROUTE_PJ4_IRQ;
-	__raw_writel(r, ICU_INT_CONF(irq));
+	__raw_writel(r, ICU_INT_CONF(d->irq));
 }
 
 static struct irq_chip icu_irq_chip = {
 	.name		= "icu_irq",
-	.mask		= icu_mask_irq,
-	.mask_ack	= icu_mask_irq,
-	.unmask		= icu_unmask_irq,
+	.irq_mask	= icu_mask_irq,
+	.irq_mask_ack	= icu_mask_irq,
+	.irq_unmask	= icu_unmask_irq,
 };
 
-static void pmic_irq_ack(unsigned int irq)
+static void pmic_irq_ack(struct irq_data *d)
 {
-	if (irq == IRQ_MMP2_PMIC)
+	if (d->irq == IRQ_MMP2_PMIC)
 		mmp2_clear_pmic_int();
 }
 
 #define SECOND_IRQ_MASK(_name_, irq_base, prefix)			\
-static void _name_##_mask_irq(unsigned int irq)				\
+static void _name_##_mask_irq(struct irq_data *d)			\
 {									\
 	uint32_t r;							\
-	r = __raw_readl(prefix##_MASK) | (1 << (irq - irq_base));	\
+	r = __raw_readl(prefix##_MASK) | (1 << (d->irq - irq_base));	\
 	__raw_writel(r, prefix##_MASK);					\
 }
 
 #define SECOND_IRQ_UNMASK(_name_, irq_base, prefix)			\
-static void _name_##_unmask_irq(unsigned int irq)			\
+static void _name_##_unmask_irq(struct irq_data *d)			\
 {									\
 	uint32_t r;							\
-	r = __raw_readl(prefix##_MASK) & ~(1 << (irq - irq_base));	\
+	r = __raw_readl(prefix##_MASK) & ~(1 << (d->irq - irq_base));	\
 	__raw_writel(r, prefix##_MASK);					\
 }
 
@@ -88,8 +88,8 @@
 SECOND_IRQ_DEMUX(_name_, irq_base, prefix)				\
 static struct irq_chip _name_##_irq_chip = {				\
 	.name		= #_name_,					\
-	.mask		= _name_##_mask_irq,				\
-	.unmask		= _name_##_unmask_irq,				\
+	.irq_mask	= _name_##_mask_irq,				\
+	.irq_unmask	= _name_##_unmask_irq,				\
 }
 
 SECOND_IRQ_CHIP(pmic, IRQ_MMP2_PMIC_BASE, MMP2_ICU_INT4);
@@ -103,10 +103,12 @@
 	int irq;
 
 	for (irq = start; num > 0; irq++, num--) {
+		struct irq_data *d = irq_get_irq_data(irq);
+
 		/* mask and clear the IRQ */
-		chip->mask(irq);
-		if (chip->ack)
-			chip->ack(irq);
+		chip->irq_mask(d);
+		if (chip->irq_ack)
+			chip->irq_ack(d);
 
 		set_irq_chip(irq, chip);
 		set_irq_flags(irq, IRQF_VALID);
@@ -119,7 +121,7 @@
 	int irq;
 
 	for (irq = 0; irq < IRQ_MMP2_MUX_BASE; irq++) {
-		icu_mask_irq(irq);
+		icu_mask_irq(irq_get_irq_data(irq));
 		set_irq_chip(irq, &icu_irq_chip);
 		set_irq_flags(irq, IRQF_VALID);
 
@@ -139,7 +141,7 @@
 	/* NOTE: IRQ_MMP2_PMIC requires the PMIC MFPR register
 	 * to be written to clear the interrupt
 	 */
-	pmic_irq_chip.ack = pmic_irq_ack;
+	pmic_irq_chip.irq_ack = pmic_irq_ack;
 
 	init_mux_irq(&pmic_irq_chip, IRQ_MMP2_PMIC_BASE, 2);
 	init_mux_irq(&rtc_irq_chip, IRQ_MMP2_RTC_BASE, 2);
diff --git a/arch/arm/mach-mmp/irq-pxa168.c b/arch/arm/mach-mmp/irq-pxa168.c
index 52ff2f0..f86b450 100644
--- a/arch/arm/mach-mmp/irq-pxa168.c
+++ b/arch/arm/mach-mmp/irq-pxa168.c
@@ -25,21 +25,21 @@
 #define PRIORITY_DEFAULT	0x1
 #define PRIORITY_NONE		0x0	/* means IRQ disabled */
 
-static void icu_mask_irq(unsigned int irq)
+static void icu_mask_irq(struct irq_data *d)
 {
-	__raw_writel(PRIORITY_NONE, ICU_INT_CONF(irq));
+	__raw_writel(PRIORITY_NONE, ICU_INT_CONF(d->irq));
 }
 
-static void icu_unmask_irq(unsigned int irq)
+static void icu_unmask_irq(struct irq_data *d)
 {
-	__raw_writel(IRQ_ROUTE_TO_AP | PRIORITY_DEFAULT, ICU_INT_CONF(irq));
+	__raw_writel(IRQ_ROUTE_TO_AP | PRIORITY_DEFAULT, ICU_INT_CONF(d->irq));
 }
 
 static struct irq_chip icu_irq_chip = {
-	.name	= "icu_irq",
-	.ack	= icu_mask_irq,
-	.mask	= icu_mask_irq,
-	.unmask	= icu_unmask_irq,
+	.name		= "icu_irq",
+	.irq_ack	= icu_mask_irq,
+	.irq_mask	= icu_mask_irq,
+	.irq_unmask	= icu_unmask_irq,
 };
 
 void __init icu_init_irq(void)
@@ -47,7 +47,7 @@
 	int irq;
 
 	for (irq = 0; irq < 64; irq++) {
-		icu_mask_irq(irq);
+		icu_mask_irq(irq_get_irq_data(irq));
 		set_irq_chip(irq, &icu_irq_chip);
 		set_irq_handler(irq, handle_level_irq);
 		set_irq_flags(irq, IRQF_VALID);
diff --git a/arch/arm/mach-msm/board-trout-gpio.c b/arch/arm/mach-msm/board-trout-gpio.c
index f8c09ef..a604ec1 100644
--- a/arch/arm/mach-msm/board-trout-gpio.c
+++ b/arch/arm/mach-msm/board-trout-gpio.c
@@ -113,52 +113,52 @@
 	TROUT_GPIO_BANK("VIRTUAL", 0x12, TROUT_GPIO_VIRTUAL_BASE, 0),
 };
 
-static void trout_gpio_irq_ack(unsigned int irq)
+static void trout_gpio_irq_ack(struct irq_data *d)
 {
-	int bank = TROUT_INT_TO_BANK(irq);
-	uint8_t mask = TROUT_INT_TO_MASK(irq);
+	int bank = TROUT_INT_TO_BANK(d->irq);
+	uint8_t mask = TROUT_INT_TO_MASK(d->irq);
 	int reg = TROUT_BANK_TO_STAT_REG(bank);
-	/*printk(KERN_INFO "trout_gpio_irq_ack irq %d\n", irq);*/
+	/*printk(KERN_INFO "trout_gpio_irq_ack irq %d\n", d->irq);*/
 	writeb(mask, TROUT_CPLD_BASE + reg);
 }
 
-static void trout_gpio_irq_mask(unsigned int irq)
+static void trout_gpio_irq_mask(struct irq_data *d)
 {
 	unsigned long flags;
 	uint8_t reg_val;
-	int bank = TROUT_INT_TO_BANK(irq);
-	uint8_t mask = TROUT_INT_TO_MASK(irq);
+	int bank = TROUT_INT_TO_BANK(d->irq);
+	uint8_t mask = TROUT_INT_TO_MASK(d->irq);
 	int reg = TROUT_BANK_TO_MASK_REG(bank);
 
 	local_irq_save(flags);
 	reg_val = trout_int_mask[bank] |= mask;
 	/*printk(KERN_INFO "trout_gpio_irq_mask irq %d => %d:%02x\n",
-	       irq, bank, reg_val);*/
+	       d->irq, bank, reg_val);*/
 	writeb(reg_val, TROUT_CPLD_BASE + reg);
 	local_irq_restore(flags);
 }
 
-static void trout_gpio_irq_unmask(unsigned int irq)
+static void trout_gpio_irq_unmask(struct irq_data *d)
 {
 	unsigned long flags;
 	uint8_t reg_val;
-	int bank = TROUT_INT_TO_BANK(irq);
-	uint8_t mask = TROUT_INT_TO_MASK(irq);
+	int bank = TROUT_INT_TO_BANK(d->irq);
+	uint8_t mask = TROUT_INT_TO_MASK(d->irq);
 	int reg = TROUT_BANK_TO_MASK_REG(bank);
 
 	local_irq_save(flags);
 	reg_val = trout_int_mask[bank] &= ~mask;
 	/*printk(KERN_INFO "trout_gpio_irq_unmask irq %d => %d:%02x\n",
-	       irq, bank, reg_val);*/
+	       d->irq, bank, reg_val);*/
 	writeb(reg_val, TROUT_CPLD_BASE + reg);
 	local_irq_restore(flags);
 }
 
-int trout_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+int trout_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	unsigned long flags;
-	int bank = TROUT_INT_TO_BANK(irq);
-	uint8_t mask = TROUT_INT_TO_MASK(irq);
+	int bank = TROUT_INT_TO_BANK(d->irq);
+	uint8_t mask = TROUT_INT_TO_MASK(d->irq);
 
 	local_irq_save(flags);
 	if(on)
@@ -198,15 +198,15 @@
 		}
 		int_base += TROUT_INT_BANK0_COUNT;
 	}
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 }
 
 static struct irq_chip trout_gpio_irq_chip = {
-	.name      = "troutgpio",
-	.ack       = trout_gpio_irq_ack,
-	.mask      = trout_gpio_irq_mask,
-	.unmask    = trout_gpio_irq_unmask,
-	.set_wake  = trout_gpio_irq_set_wake,
+	.name          = "troutgpio",
+	.irq_ack       = trout_gpio_irq_ack,
+	.irq_mask      = trout_gpio_irq_mask,
+	.irq_unmask    = trout_gpio_irq_unmask,
+	.irq_set_wake  = trout_gpio_irq_set_wake,
 };
 
 /*
diff --git a/arch/arm/mach-msm/gpio.c b/arch/arm/mach-msm/gpio.c
index 33051b5..176af9d 100644
--- a/arch/arm/mach-msm/gpio.c
+++ b/arch/arm/mach-msm/gpio.c
@@ -225,21 +225,21 @@
 #endif
 };
 
-static void msm_gpio_irq_ack(unsigned int irq)
+static void msm_gpio_irq_ack(struct irq_data *d)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 	msm_gpio_clear_detect_status(msm_chip,
-				     irq - gpio_to_irq(msm_chip->chip.base));
+				     d->irq - gpio_to_irq(msm_chip->chip.base));
 	spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
 }
 
-static void msm_gpio_irq_mask(unsigned int irq)
+static void msm_gpio_irq_mask(struct irq_data *d)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
-	unsigned offset = irq - gpio_to_irq(msm_chip->chip.base);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+	unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
 
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 	/* level triggered interrupts are also latched */
@@ -250,11 +250,11 @@
 	spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
 }
 
-static void msm_gpio_irq_unmask(unsigned int irq)
+static void msm_gpio_irq_unmask(struct irq_data *d)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
-	unsigned offset = irq - gpio_to_irq(msm_chip->chip.base);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+	unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
 
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 	/* level triggered interrupts are also latched */
@@ -265,11 +265,11 @@
 	spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
 }
 
-static int msm_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
-	unsigned offset = irq - gpio_to_irq(msm_chip->chip.base);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+	unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
 
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 
@@ -282,21 +282,21 @@
 	return 0;
 }
 
-static int msm_gpio_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
-	unsigned offset = irq - gpio_to_irq(msm_chip->chip.base);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+	unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
 	unsigned val, mask = BIT(offset);
 
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 	val = readl(msm_chip->regs.int_edge);
 	if (flow_type & IRQ_TYPE_EDGE_BOTH) {
 		writel(val | mask, msm_chip->regs.int_edge);
-		irq_desc[irq].handle_irq = handle_edge_irq;
+		irq_desc[d->irq].handle_irq = handle_edge_irq;
 	} else {
 		writel(val & ~mask, msm_chip->regs.int_edge);
-		irq_desc[irq].handle_irq = handle_level_irq;
+		irq_desc[d->irq].handle_irq = handle_level_irq;
 	}
 	if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
 		msm_chip->both_edge_detect |= mask;
@@ -333,16 +333,16 @@
 					   msm_chip->chip.base + j);
 		}
 	}
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 }
 
 static struct irq_chip msm_gpio_irq_chip = {
-	.name      = "msmgpio",
-	.ack       = msm_gpio_irq_ack,
-	.mask      = msm_gpio_irq_mask,
-	.unmask    = msm_gpio_irq_unmask,
-	.set_wake  = msm_gpio_irq_set_wake,
-	.set_type  = msm_gpio_irq_set_type,
+	.name          = "msmgpio",
+	.irq_ack       = msm_gpio_irq_ack,
+	.irq_mask      = msm_gpio_irq_mask,
+	.irq_unmask    = msm_gpio_irq_unmask,
+	.irq_set_wake  = msm_gpio_irq_set_wake,
+	.irq_set_type  = msm_gpio_irq_set_type,
 };
 
 static int __init msm_init_gpio(void)
diff --git a/arch/arm/mach-msm/irq-vic.c b/arch/arm/mach-msm/irq-vic.c
index 99f2c34..68c28bb 100644
--- a/arch/arm/mach-msm/irq-vic.c
+++ b/arch/arm/mach-msm/irq-vic.c
@@ -226,19 +226,18 @@
 		writel(val, base + (i * 4));
 }
 
-static void msm_irq_ack(unsigned int irq)
+static void msm_irq_ack(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, irq);
-	irq = 1 << (irq & 31);
-	writel(irq, reg);
+	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, d->irq);
+	writel(1 << (d->irq & 31), reg);
 }
 
-static void msm_irq_mask(unsigned int irq)
+static void msm_irq_mask(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, irq);
-	unsigned index = VIC_INT_TO_REG_INDEX(irq);
-	uint32_t mask = 1UL << (irq & 31);
-	int smsm_irq = msm_irq_to_smsm[irq];
+	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, d->irq);
+	unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
+	uint32_t mask = 1UL << (d->irq & 31);
+	int smsm_irq = msm_irq_to_smsm[d->irq];
 
 	msm_irq_shadow_reg[index].int_en[0] &= ~mask;
 	writel(mask, reg);
@@ -250,12 +249,12 @@
 	}
 }
 
-static void msm_irq_unmask(unsigned int irq)
+static void msm_irq_unmask(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, irq);
-	unsigned index = VIC_INT_TO_REG_INDEX(irq);
-	uint32_t mask = 1UL << (irq & 31);
-	int smsm_irq = msm_irq_to_smsm[irq];
+	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, d->irq);
+	unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
+	uint32_t mask = 1UL << (d->irq & 31);
+	int smsm_irq = msm_irq_to_smsm[d->irq];
 
 	msm_irq_shadow_reg[index].int_en[0] |= mask;
 	writel(mask, reg);
@@ -268,14 +267,14 @@
 	}
 }
 
-static int msm_irq_set_wake(unsigned int irq, unsigned int on)
+static int msm_irq_set_wake(struct irq_data *d, unsigned int on)
 {
-	unsigned index = VIC_INT_TO_REG_INDEX(irq);
-	uint32_t mask = 1UL << (irq & 31);
-	int smsm_irq = msm_irq_to_smsm[irq];
+	unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
+	uint32_t mask = 1UL << (d->irq & 31);
+	int smsm_irq = msm_irq_to_smsm[d->irq];
 
 	if (smsm_irq == 0) {
-		printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", irq);
+		printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", d->irq);
 		return -EINVAL;
 	}
 	if (on)
@@ -294,12 +293,12 @@
 	return 0;
 }
 
-static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, irq);
-	void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, irq);
-	unsigned index = VIC_INT_TO_REG_INDEX(irq);
-	int b = 1 << (irq & 31);
+	void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, d->irq);
+	void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, d->irq);
+	unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
+	int b = 1 << (d->irq & 31);
 	uint32_t polarity;
 	uint32_t type;
 
@@ -314,11 +313,11 @@
 	type = msm_irq_shadow_reg[index].int_type;
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
 		type |= b;
-		irq_desc[irq].handle_irq = handle_edge_irq;
+		irq_desc[d->irq].handle_irq = handle_edge_irq;
 	}
 	if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
 		type &= ~b;
-		irq_desc[irq].handle_irq = handle_level_irq;
+		irq_desc[d->irq].handle_irq = handle_level_irq;
 	}
 	writel(type, treg);
 	msm_irq_shadow_reg[index].int_type = type;
@@ -326,13 +325,13 @@
 }
 
 static struct irq_chip msm_irq_chip = {
-	.name      = "msm",
-	.disable   = msm_irq_mask,
-	.ack       = msm_irq_ack,
-	.mask      = msm_irq_mask,
-	.unmask    = msm_irq_unmask,
-	.set_wake  = msm_irq_set_wake,
-	.set_type  = msm_irq_set_type,
+	.name          = "msm",
+	.irq_disable   = msm_irq_mask,
+	.irq_ack       = msm_irq_ack,
+	.irq_mask      = msm_irq_mask,
+	.irq_unmask    = msm_irq_unmask,
+	.irq_set_wake  = msm_irq_set_wake,
+	.irq_set_type  = msm_irq_set_type,
 };
 
 void __init msm_init_irq(void)
diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c
index 6c8d5f8..0b27d89 100644
--- a/arch/arm/mach-msm/irq.c
+++ b/arch/arm/mach-msm/irq.c
@@ -64,35 +64,34 @@
 #define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4))
 #define VIC_VECTADDR(n)     VIC_REG(0x0400+((n) * 4))
 
-static void msm_irq_ack(unsigned int irq)
+static void msm_irq_ack(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_CLEAR0 + ((irq & 32) ? 4 : 0);
-	irq = 1 << (irq & 31);
-	writel(irq, reg);
+	void __iomem *reg = VIC_INT_CLEAR0 + ((d->irq & 32) ? 4 : 0);
+	writel(1 << (d->irq & 31), reg);
 }
 
-static void msm_irq_mask(unsigned int irq)
+static void msm_irq_mask(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_ENCLEAR0 + ((irq & 32) ? 4 : 0);
-	writel(1 << (irq & 31), reg);
+	void __iomem *reg = VIC_INT_ENCLEAR0 + ((d->irq & 32) ? 4 : 0);
+	writel(1 << (d->irq & 31), reg);
 }
 
-static void msm_irq_unmask(unsigned int irq)
+static void msm_irq_unmask(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_ENSET0 + ((irq & 32) ? 4 : 0);
-	writel(1 << (irq & 31), reg);
+	void __iomem *reg = VIC_INT_ENSET0 + ((d->irq & 32) ? 4 : 0);
+	writel(1 << (d->irq & 31), reg);
 }
 
-static int msm_irq_set_wake(unsigned int irq, unsigned int on)
+static int msm_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	return -EINVAL;
 }
 
-static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	void __iomem *treg = VIC_INT_TYPE0 + ((irq & 32) ? 4 : 0);
-	void __iomem *preg = VIC_INT_POLARITY0 + ((irq & 32) ? 4 : 0);
-	int b = 1 << (irq & 31);
+	void __iomem *treg = VIC_INT_TYPE0 + ((d->irq & 32) ? 4 : 0);
+	void __iomem *preg = VIC_INT_POLARITY0 + ((d->irq & 32) ? 4 : 0);
+	int b = 1 << (d->irq & 31);
 
 	if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
 		writel(readl(preg) | b, preg);
@@ -101,22 +100,22 @@
 
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
 		writel(readl(treg) | b, treg);
-		irq_desc[irq].handle_irq = handle_edge_irq;
+		irq_desc[d->irq].handle_irq = handle_edge_irq;
 	}
 	if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
 		writel(readl(treg) & (~b), treg);
-		irq_desc[irq].handle_irq = handle_level_irq;
+		irq_desc[d->irq].handle_irq = handle_level_irq;
 	}
 	return 0;
 }
 
 static struct irq_chip msm_irq_chip = {
-	.name      = "msm",
-	.ack       = msm_irq_ack,
-	.mask      = msm_irq_mask,
-	.unmask    = msm_irq_unmask,
-	.set_wake  = msm_irq_set_wake,
-	.set_type  = msm_irq_set_type,
+	.name          = "msm",
+	.irq_ack       = msm_irq_ack,
+	.irq_mask      = msm_irq_mask,
+	.irq_unmask    = msm_irq_unmask,
+	.irq_set_wake  = msm_irq_set_wake,
+	.irq_set_type  = msm_irq_set_type,
 };
 
 void __init msm_init_irq(void)
diff --git a/arch/arm/mach-msm/sirc.c b/arch/arm/mach-msm/sirc.c
index 152eefd..11b54c7 100644
--- a/arch/arm/mach-msm/sirc.c
+++ b/arch/arm/mach-msm/sirc.c
@@ -42,12 +42,11 @@
 
 /* Mask off the given interrupt. Keep the int_enable mask in sync with
    the enable reg, so it can be restored after power collapse. */
-static void sirc_irq_mask(unsigned int irq)
+static void sirc_irq_mask(struct irq_data *d)
 {
 	unsigned int mask;
 
-
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	writel(mask, sirc_regs.int_enable_clear);
 	int_enable &= ~mask;
 	return;
@@ -55,31 +54,31 @@
 
 /* Unmask the given interrupt. Keep the int_enable mask in sync with
    the enable reg, so it can be restored after power collapse. */
-static void sirc_irq_unmask(unsigned int irq)
+static void sirc_irq_unmask(struct irq_data *d)
 {
 	unsigned int mask;
 
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	writel(mask, sirc_regs.int_enable_set);
 	int_enable |= mask;
 	return;
 }
 
-static void sirc_irq_ack(unsigned int irq)
+static void sirc_irq_ack(struct irq_data *d)
 {
 	unsigned int mask;
 
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	writel(mask, sirc_regs.int_clear);
 	return;
 }
 
-static int sirc_irq_set_wake(unsigned int irq, unsigned int on)
+static int sirc_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	unsigned int mask;
 
 	/* Used to set the interrupt enable mask during power collapse. */
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	if (on)
 		wake_enable |= mask;
 	else
@@ -88,12 +87,12 @@
 	return 0;
 }
 
-static int sirc_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int sirc_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
 	unsigned int mask;
 	unsigned int val;
 
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	val = readl(sirc_regs.int_polarity);
 
 	if (flow_type & (IRQF_TRIGGER_LOW | IRQF_TRIGGER_FALLING))
@@ -106,10 +105,10 @@
 	val = readl(sirc_regs.int_type);
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
 		val |= mask;
-		irq_desc[irq].handle_irq = handle_edge_irq;
+		irq_desc[d->irq].handle_irq = handle_edge_irq;
 	} else {
 		val &= ~mask;
-		irq_desc[irq].handle_irq = handle_level_irq;
+		irq_desc[d->irq].handle_irq = handle_level_irq;
 	}
 
 	writel(val, sirc_regs.int_type);
@@ -139,16 +138,16 @@
 		;
 	generic_handle_irq(sirq+FIRST_SIRC_IRQ);
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 }
 
 static struct irq_chip sirc_irq_chip = {
-	.name      = "sirc",
-	.ack       = sirc_irq_ack,
-	.mask      = sirc_irq_mask,
-	.unmask    = sirc_irq_unmask,
-	.set_wake  = sirc_irq_set_wake,
-	.set_type  = sirc_irq_set_type,
+	.name          = "sirc",
+	.irq_ack       = sirc_irq_ack,
+	.irq_mask      = sirc_irq_mask,
+	.irq_unmask    = sirc_irq_unmask,
+	.irq_set_wake  = sirc_irq_set_wake,
+	.irq_set_type  = sirc_irq_set_type,
 };
 
 void __init msm_init_sirc(void)
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index 899a969..0d65db8 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -147,10 +147,10 @@
 		.init_data = &pwgtx_init,
 	}, {
 
-		.id = MC13783_REGU_GPO1, /* Turn on 1.8V */
+		.id = MC13783_REG_GPO1, /* Turn on 1.8V */
 		.init_data = &gpo_init,
 	}, {
-		.id = MC13783_REGU_GPO3, /* Turn on 3.3V */
+		.id = MC13783_REG_GPO3, /* Turn on 3.3V */
 		.init_data = &gpo_init,
 	},
 };
diff --git a/arch/arm/mach-mx3/mach-mx31ads.c b/arch/arm/mach-mx3/mach-mx31ads.c
index b993b9b..88b97d6 100644
--- a/arch/arm/mach-mx3/mach-mx31ads.c
+++ b/arch/arm/mach-mx3/mach-mx31ads.c
@@ -162,9 +162,9 @@
  * Disable an expio pin's interrupt by setting the bit in the imr.
  * @param irq           an expio virtual irq number
  */
-static void expio_mask_irq(u32 irq)
+static void expio_mask_irq(struct irq_data *d)
 {
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 	/* mask the interrupt */
 	__raw_writew(1 << expio, PBC_INTMASK_CLEAR_REG);
 	__raw_readw(PBC_INTMASK_CLEAR_REG);
@@ -174,9 +174,9 @@
  * Acknowledge an expanded io pin's interrupt by clearing the bit in the isr.
  * @param irq           an expanded io virtual irq number
  */
-static void expio_ack_irq(u32 irq)
+static void expio_ack_irq(struct irq_data *d)
 {
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 	/* clear the interrupt status */
 	__raw_writew(1 << expio, PBC_INTSTATUS_REG);
 }
@@ -185,18 +185,18 @@
  * Enable a expio pin's interrupt by clearing the bit in the imr.
  * @param irq           a expio virtual irq number
  */
-static void expio_unmask_irq(u32 irq)
+static void expio_unmask_irq(struct irq_data *d)
 {
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 	/* unmask the interrupt */
 	__raw_writew(1 << expio, PBC_INTMASK_SET_REG);
 }
 
 static struct irq_chip expio_irq_chip = {
 	.name = "EXPIO(CPLD)",
-	.ack = expio_ack_irq,
-	.mask = expio_mask_irq,
-	.unmask = expio_unmask_irq,
+	.irq_ack = expio_ack_irq,
+	.irq_mask = expio_mask_irq,
+	.irq_unmask = expio_unmask_irq,
 };
 
 static void __init mx31ads_init_expio(void)
diff --git a/arch/arm/mach-mx5/Kconfig b/arch/arm/mach-mx5/Kconfig
index 55254b6..de4fa992f 100644
--- a/arch/arm/mach-mx5/Kconfig
+++ b/arch/arm/mach-mx5/Kconfig
@@ -50,6 +50,7 @@
 config MACH_MX51_3DS
 	bool "Support MX51PDK (3DS)"
 	select SOC_IMX51
+	select IMX_HAVE_PLATFORM_IMX_KEYPAD
 	select IMX_HAVE_PLATFORM_IMX_UART
 	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
 	select IMX_HAVE_PLATFORM_SPI_IMX
@@ -77,6 +78,7 @@
 config MACH_EUKREA_MBIMX51_BASEBOARD
 	prompt "Eukrea MBIMX51 development board"
 	bool
+	select IMX_HAVE_PLATFORM_IMX_KEYPAD
 	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
 	help
 	  This adds board specific devices that can be found on Eukrea's
@@ -124,10 +126,28 @@
 	bool "Support MX53 EVK platforms"
 	select SOC_IMX53
 	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select IMX_HAVE_PLATFORM_SPI_IMX
 	help
 	  Include support for MX53 EVK platform. This includes specific
 	  configurations for the board and its peripherals.
 
+config MACH_MX53_SMD
+	bool "Support MX53 SMD platforms"
+	select SOC_IMX53
+	select IMX_HAVE_PLATFORM_IMX_UART
+	help
+	  Include support for MX53 SMD platform. This includes specific
+	  configurations for the board and its peripherals.
+
+config MACH_MX53_LOCO
+	bool "Support MX53 LOCO platforms"
+	select SOC_IMX53
+	select IMX_HAVE_PLATFORM_IMX_UART
+	help
+	  Include support for MX53 LOCO platform. This includes specific
+	  configurations for the board and its peripherals.
 
 config MACH_MX50_RDP
 	bool "Support MX50 reference design platform"
diff --git a/arch/arm/mach-mx5/Makefile b/arch/arm/mach-mx5/Makefile
index 0c398ba..0d43be9 100644
--- a/arch/arm/mach-mx5/Makefile
+++ b/arch/arm/mach-mx5/Makefile
@@ -10,6 +10,8 @@
 obj-$(CONFIG_MACH_MX51_BABBAGE) += board-mx51_babbage.o
 obj-$(CONFIG_MACH_MX51_3DS) += board-mx51_3ds.o
 obj-$(CONFIG_MACH_MX53_EVK) += board-mx53_evk.o
+obj-$(CONFIG_MACH_MX53_SMD) += board-mx53_smd.o
+obj-$(CONFIG_MACH_MX53_LOCO) += board-mx53_loco.o
 obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += board-cpuimx51.o
 obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
 obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += board-cpuimx51sd.o
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c
index e42bd2e..49d6448 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-mx5/board-mx51_3ds.c
@@ -12,7 +12,6 @@
 
 #include <linux/irq.h>
 #include <linux/platform_device.h>
-#include <linux/input/matrix_keypad.h>
 #include <linux/spi/spi.h>
 
 #include <asm/mach-types.h>
@@ -120,14 +119,14 @@
 	KEY(3, 5, KEY_BACK)
 };
 
-static struct matrix_keymap_data mx51_3ds_map_data = {
+static const struct matrix_keymap_data mx51_3ds_map_data __initconst = {
 	.keymap		= mx51_3ds_board_keymap,
 	.keymap_size	= ARRAY_SIZE(mx51_3ds_board_keymap),
 };
 
 static void mxc_init_keypad(void)
 {
-	mxc_register_device(&mxc_keypad_device, &mx51_3ds_map_data);
+	imx51_add_imx_keypad(&mx51_3ds_map_data);
 }
 #else
 static inline void mxc_init_keypad(void)
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index fa97d0d..caee04c 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -21,6 +21,11 @@
 
 #include <linux/init.h>
 #include <linux/clk.h>
+#include <linux/fec.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
 #include <mach/common.h>
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
@@ -29,6 +34,10 @@
 #include <mach/imx-uart.h>
 #include <mach/iomux-mx53.h>
 
+#define SMD_FEC_PHY_RST		IMX_GPIO_NR(7, 6)
+#define EVK_ECSPI1_CS0		IMX_GPIO_NR(2, 30)
+#define EVK_ECSPI1_CS1		IMX_GPIO_NR(3, 19)
+
 #include "crm_regs.h"
 #include "devices-imx53.h"
 
@@ -47,6 +56,14 @@
 	MX53_PAD_ATA_CS_1__UART3_RXD,
 	MX53_PAD_ATA_DA_1__UART3_CTS,
 	MX53_PAD_ATA_DA_2__UART3_RTS,
+
+	MX53_PAD_EIM_D16__CSPI1_SCLK,
+	MX53_PAD_EIM_D17__CSPI1_MISO,
+	MX53_PAD_EIM_D18__CSPI1_MOSI,
+
+	/* ecspi chip select lines */
+	MX53_PAD_EIM_EB2__GPIO_2_30,
+	MX53_PAD_EIM_D19__GPIO_3_19,
 };
 
 static const struct imxuart_platform_data mx53_evk_uart_pdata __initconst = {
@@ -60,11 +77,68 @@
 	imx53_add_imx_uart(2, &mx53_evk_uart_pdata);
 }
 
+static const struct imxi2c_platform_data mx53_evk_i2c_data __initconst = {
+	.bitrate = 100000,
+};
+
+static inline void mx53_evk_fec_reset(void)
+{
+	int ret;
+
+	/* reset FEC PHY */
+	ret = gpio_request(SMD_FEC_PHY_RST, "fec-phy-reset");
+	if (ret) {
+		printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret);
+		return;
+	}
+	gpio_direction_output(SMD_FEC_PHY_RST, 0);
+	gpio_set_value(SMD_FEC_PHY_RST, 0);
+	msleep(1);
+	gpio_set_value(SMD_FEC_PHY_RST, 1);
+}
+
+static struct fec_platform_data mx53_evk_fec_pdata = {
+	.phy = PHY_INTERFACE_MODE_RMII,
+};
+
+static struct spi_board_info mx53_evk_spi_board_info[] __initdata = {
+	{
+		.modalias = "mtd_dataflash",
+		.max_speed_hz = 25000000,
+		.bus_num = 0,
+		.chip_select = 1,
+		.mode = SPI_MODE_0,
+		.platform_data = NULL,
+	},
+};
+
+static int mx53_evk_spi_cs[] = {
+	EVK_ECSPI1_CS0,
+	EVK_ECSPI1_CS1,
+};
+
+static const struct spi_imx_master mx53_evk_spi_data __initconst = {
+	.chipselect     = mx53_evk_spi_cs,
+	.num_chipselect = ARRAY_SIZE(mx53_evk_spi_cs),
+};
+
 static void __init mx53_evk_board_init(void)
 {
 	mxc_iomux_v3_setup_multiple_pads(mx53_evk_pads,
 					ARRAY_SIZE(mx53_evk_pads));
 	mx53_evk_init_uart();
+	mx53_evk_fec_reset();
+	imx53_add_fec(&mx53_evk_fec_pdata);
+
+	imx53_add_imx_i2c(0, &mx53_evk_i2c_data);
+	imx53_add_imx_i2c(1, &mx53_evk_i2c_data);
+
+	imx53_add_sdhci_esdhc_imx(0, NULL);
+	imx53_add_sdhci_esdhc_imx(1, NULL);
+
+	spi_register_board_info(mx53_evk_spi_board_info,
+		ARRAY_SIZE(mx53_evk_spi_board_info));
+	imx53_add_ecspi(0, &mx53_evk_spi_data);
 }
 
 static void __init mx53_evk_timer_init(void)
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
new file mode 100644
index 0000000..d1348e0
--- /dev/null
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/fec.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/imx-uart.h>
+#include <mach/iomux-mx53.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+
+#include "crm_regs.h"
+#include "devices-imx53.h"
+
+#define LOCO_FEC_PHY_RST		IMX_GPIO_NR(7, 6)
+
+static iomux_v3_cfg_t mx53_loco_pads[] = {
+	MX53_PAD_CSI0_D10__UART1_TXD,
+	MX53_PAD_CSI0_D11__UART1_RXD,
+	MX53_PAD_ATA_DIOW__UART1_TXD,
+	MX53_PAD_ATA_DMACK__UART1_RXD,
+
+	MX53_PAD_ATA_BUFFER_EN__UART2_RXD,
+	MX53_PAD_ATA_DMARQ__UART2_TXD,
+	MX53_PAD_ATA_DIOR__UART2_RTS,
+	MX53_PAD_ATA_INTRQ__UART2_CTS,
+
+	MX53_PAD_ATA_CS_0__UART3_TXD,
+	MX53_PAD_ATA_CS_1__UART3_RXD,
+	MX53_PAD_ATA_DA_1__UART3_CTS,
+	MX53_PAD_ATA_DA_2__UART3_RTS,
+};
+
+static const struct imxuart_platform_data mx53_loco_uart_data __initconst = {
+	.flags = IMXUART_HAVE_RTSCTS,
+};
+
+static inline void mx53_loco_init_uart(void)
+{
+	imx53_add_imx_uart(0, &mx53_loco_uart_data);
+	imx53_add_imx_uart(1, &mx53_loco_uart_data);
+	imx53_add_imx_uart(2, &mx53_loco_uart_data);
+}
+
+static inline void mx53_loco_fec_reset(void)
+{
+	int ret;
+
+	/* reset FEC PHY */
+	ret = gpio_request(LOCO_FEC_PHY_RST, "fec-phy-reset");
+	if (ret) {
+		printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret);
+		return;
+	}
+	gpio_direction_output(LOCO_FEC_PHY_RST, 0);
+	msleep(1);
+	gpio_set_value(LOCO_FEC_PHY_RST, 1);
+}
+
+static struct fec_platform_data mx53_loco_fec_data = {
+	.phy = PHY_INTERFACE_MODE_RMII,
+};
+
+static void __init mx53_loco_board_init(void)
+{
+	mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads,
+					ARRAY_SIZE(mx53_loco_pads));
+	mx53_loco_init_uart();
+	mx53_loco_fec_reset();
+	imx53_add_fec(&mx53_loco_fec_data);
+}
+
+static void __init mx53_loco_timer_init(void)
+{
+	mx53_clocks_init(32768, 24000000, 0, 0);
+}
+
+static struct sys_timer mx53_loco_timer = {
+	.init	= mx53_loco_timer_init,
+};
+
+MACHINE_START(MX53_LOCO, "Freescale MX53 LOCO Board")
+	.map_io = mx53_map_io,
+	.init_irq = mx53_init_irq,
+	.init_machine = mx53_loco_board_init,
+	.timer = &mx53_loco_timer,
+MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
new file mode 100644
index 0000000..7970f7a
--- /dev/null
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/fec.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/imx-uart.h>
+#include <mach/iomux-mx53.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+
+#include "crm_regs.h"
+#include "devices-imx53.h"
+
+#define SMD_FEC_PHY_RST		IMX_GPIO_NR(7, 6)
+
+static iomux_v3_cfg_t mx53_smd_pads[] = {
+	MX53_PAD_CSI0_D10__UART1_TXD,
+	MX53_PAD_CSI0_D11__UART1_RXD,
+	MX53_PAD_ATA_DIOW__UART1_TXD,
+	MX53_PAD_ATA_DMACK__UART1_RXD,
+
+	MX53_PAD_ATA_BUFFER_EN__UART2_RXD,
+	MX53_PAD_ATA_DMARQ__UART2_TXD,
+	MX53_PAD_ATA_DIOR__UART2_RTS,
+	MX53_PAD_ATA_INTRQ__UART2_CTS,
+
+	MX53_PAD_ATA_CS_0__UART3_TXD,
+	MX53_PAD_ATA_CS_1__UART3_RXD,
+	MX53_PAD_ATA_DA_1__UART3_CTS,
+	MX53_PAD_ATA_DA_2__UART3_RTS,
+};
+
+static const struct imxuart_platform_data mx53_smd_uart_data __initconst = {
+	.flags = IMXUART_HAVE_RTSCTS,
+};
+
+static inline void mx53_smd_init_uart(void)
+{
+	imx53_add_imx_uart(0, &mx53_smd_uart_data);
+	imx53_add_imx_uart(1, &mx53_smd_uart_data);
+	imx53_add_imx_uart(2, &mx53_smd_uart_data);
+}
+
+static inline void mx53_smd_fec_reset(void)
+{
+	int ret;
+
+	/* reset FEC PHY */
+	ret = gpio_request(SMD_FEC_PHY_RST, "fec-phy-reset");
+	if (ret) {
+		printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret);
+		return;
+	}
+	gpio_direction_output(SMD_FEC_PHY_RST, 0);
+	msleep(1);
+	gpio_set_value(SMD_FEC_PHY_RST, 1);
+}
+
+static struct fec_platform_data mx53_smd_fec_data = {
+	.phy = PHY_INTERFACE_MODE_RMII,
+};
+
+static void __init mx53_smd_board_init(void)
+{
+	mxc_iomux_v3_setup_multiple_pads(mx53_smd_pads,
+					ARRAY_SIZE(mx53_smd_pads));
+	mx53_smd_init_uart();
+	mx53_smd_fec_reset();
+	imx53_add_fec(&mx53_smd_fec_data);
+}
+
+static void __init mx53_smd_timer_init(void)
+{
+	mx53_clocks_init(32768, 24000000, 22579200, 0);
+}
+
+static struct sys_timer mx53_smd_timer = {
+	.init	= mx53_smd_timer_init,
+};
+
+MACHINE_START(MX53_SMD, "Freescale MX53 SMD Board")
+	.map_io = mx53_map_io,
+	.init_irq = mx53_init_irq,
+	.init_machine = mx53_smd_board_init,
+	.timer = &mx53_smd_timer,
+MACHINE_END
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index 785e1a3..0a19e75 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -1191,6 +1191,11 @@
 DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
 	NULL,  NULL, &ipg_clk, &gpt_ipg_clk);
 
+DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
+	NULL, NULL, &ipg_clk, NULL);
+DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
+	NULL, NULL, &ipg_clk, NULL);
+
 /* I2C */
 DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
 	NULL, NULL, &ipg_clk, NULL);
@@ -1283,6 +1288,8 @@
 	_REGISTER_CLOCK("imx-uart.2", NULL, uart3_clk)
 	_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
 	_REGISTER_CLOCK("fec.0", NULL, fec_clk)
+	_REGISTER_CLOCK("mxc_pwm.0", "pwm", pwm1_clk)
+	_REGISTER_CLOCK("mxc_pwm.1", "pwm", pwm2_clk)
 	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
 	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
 	_REGISTER_CLOCK("imx-i2c.2", NULL, hsi2c_clk)
@@ -1295,7 +1302,7 @@
 	_REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
 	_REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
 	_REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
-	_REGISTER_CLOCK("imx-keypad.0", NULL, kpp_clk)
+	_REGISTER_CLOCK("imx-keypad", NULL, kpp_clk)
 	_REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
 	_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
 	_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
@@ -1326,6 +1333,13 @@
 	_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
 	_REGISTER_CLOCK("fec.0", NULL, fec_clk)
 	_REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
+	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
+	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
+	_REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk)
+	_REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk)
+	_REGISTER_CLOCK("imx53-ecspi.0", NULL, ecspi1_clk)
+	_REGISTER_CLOCK("imx53-ecspi.1", NULL, ecspi2_clk)
+	_REGISTER_CLOCK("imx53-cspi.0", NULL, cspi_clk)
 };
 
 static void clk_tree_init(void)
@@ -1363,7 +1377,6 @@
 
 	clk_tree_init();
 
-	clk_set_parent(&uart_root_clk, &pll3_sw_clk);
 	clk_enable(&cpu_clk);
 	clk_enable(&main_bus_clk);
 
@@ -1406,6 +1419,7 @@
 
 	clk_tree_init();
 
+	clk_set_parent(&uart_root_clk, &pll3_sw_clk);
 	clk_enable(&cpu_clk);
 	clk_enable(&main_bus_clk);
 
diff --git a/arch/arm/mach-mx5/devices-imx51.h b/arch/arm/mach-mx5/devices-imx51.h
index 6302e46..7fff485 100644
--- a/arch/arm/mach-mx5/devices-imx51.h
+++ b/arch/arm/mach-mx5/devices-imx51.h
@@ -47,3 +47,11 @@
 extern const struct imx_imx2_wdt_data imx51_imx2_wdt_data[] __initconst;
 #define imx51_add_imx2_wdt(id, pdata)	\
 	imx_add_imx2_wdt(&imx51_imx2_wdt_data[id])
+
+extern const struct imx_mxc_pwm_data imx51_mxc_pwm_data[] __initconst;
+#define imx51_add_mxc_pwm(id)	\
+	imx_add_mxc_pwm(&imx51_mxc_pwm_data[id])
+
+extern const struct imx_imx_keypad_data imx51_imx_keypad_data __initconst;
+#define imx51_add_imx_keypad(pdata)	\
+	imx_add_imx_keypad(&imx51_imx_keypad_data, pdata)
diff --git a/arch/arm/mach-mx5/devices-imx53.h b/arch/arm/mach-mx5/devices-imx53.h
index 9d0ec25..8639735 100644
--- a/arch/arm/mach-mx5/devices-imx53.h
+++ b/arch/arm/mach-mx5/devices-imx53.h
@@ -8,6 +8,24 @@
 #include <mach/mx53.h>
 #include <mach/devices-common.h>
 
+extern const struct imx_fec_data imx53_fec_data __initconst;
+#define imx53_add_fec(pdata)   \
+	imx_add_fec(&imx53_fec_data, pdata)
+
 extern const struct imx_imx_uart_1irq_data imx53_imx_uart_data[] __initconst;
 #define imx53_add_imx_uart(id, pdata)	\
 	imx_add_imx_uart_1irq(&imx53_imx_uart_data[id], pdata)
+
+
+extern const struct imx_imx_i2c_data imx53_imx_i2c_data[] __initconst;
+#define imx53_add_imx_i2c(id, pdata)	\
+	imx_add_imx_i2c(&imx53_imx_i2c_data[id], pdata)
+
+extern const struct imx_sdhci_esdhc_imx_data
+imx53_sdhci_esdhc_imx_data[] __initconst;
+#define imx53_add_sdhci_esdhc_imx(id, pdata)	\
+	imx_add_sdhci_esdhc_imx(&imx53_sdhci_esdhc_imx_data[id], pdata)
+
+extern const struct imx_spi_imx_data imx53_ecspi_data[] __initconst;
+#define imx53_add_ecspi(id, pdata)	\
+	imx_add_spi_imx(&imx53_ecspi_data[id], pdata)
diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c
index 1bda5cb..153ada5 100644
--- a/arch/arm/mach-mx5/devices.c
+++ b/arch/arm/mach-mx5/devices.c
@@ -120,25 +120,6 @@
 	},
 };
 
-static struct resource mxc_kpp_resources[] = {
-	{
-		.start = MX51_MXC_INT_KPP,
-		.end = MX51_MXC_INT_KPP,
-		.flags = IORESOURCE_IRQ,
-	} , {
-		.start = MX51_KPP_BASE_ADDR,
-		.end = MX51_KPP_BASE_ADDR + 0x8 - 1,
-		.flags = IORESOURCE_MEM,
-	},
-};
-
-struct platform_device mxc_keypad_device = {
-	.name = "imx-keypad",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(mxc_kpp_resources),
-	.resource = mxc_kpp_resources,
-};
-
 static struct mxc_gpio_port mxc_gpio_ports[] = {
 	{
 		.chip.label = "gpio-0",
diff --git a/arch/arm/mach-mx5/devices.h b/arch/arm/mach-mx5/devices.h
index 16891aa..55a5129 100644
--- a/arch/arm/mach-mx5/devices.h
+++ b/arch/arm/mach-mx5/devices.h
@@ -3,4 +3,3 @@
 extern struct platform_device mxc_usbh2_device;
 extern struct platform_device mxc_usbdr_udc_device;
 extern struct platform_device mxc_hsi2c_device;
-extern struct platform_device mxc_keypad_device;
diff --git a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c b/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
index c96d018..e83ffad 100644
--- a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
+++ b/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
@@ -21,7 +21,6 @@
 #include <linux/fsl_devices.h>
 #include <linux/i2c/tsc2007.h>
 #include <linux/leds.h>
-#include <linux/input/matrix_keypad.h>
 
 #include <mach/common.h>
 #include <mach/hardware.h>
@@ -157,7 +156,7 @@
 	KEY(3, 3, KEY_ENTER),
 };
 
-static struct matrix_keymap_data mbimx51_map_data = {
+static const struct matrix_keymap_data mbimx51_map_data __initconst = {
 	.keymap		= mbimx51_keymap,
 	.keymap_size	= ARRAY_SIZE(mbimx51_keymap),
 };
@@ -209,7 +208,7 @@
 
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 
-	mxc_register_device(&mxc_keypad_device, &mbimx51_map_data);
+	imx51_add_imx_keypad(&mbimx51_map_data);
 
 	gpio_request(MBIMX51_TSC2007_GPIO, "tsc2007_irq");
 	gpio_direction_input(MBIMX51_TSC2007_GPIO);
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index c4ac7b4..8bfc8df 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -15,7 +15,7 @@
 config MACH_MX23EVK
 	bool "Support MX23EVK Platform"
 	select SOC_IMX23
-	select MXS_HAVE_PLATFORM_DUART
+	select MXS_HAVE_AMBA_DUART
 	default y
 	help
 	  Include support for MX23EVK platform. This includes specific
@@ -24,7 +24,7 @@
 config MACH_MX28EVK
 	bool "Support MX28EVK Platform"
 	select SOC_IMX28
-	select MXS_HAVE_PLATFORM_DUART
+	select MXS_HAVE_AMBA_DUART
 	select MXS_HAVE_PLATFORM_FEC
 	default y
 	help
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
index 8f5a19a..b1a362e 100644
--- a/arch/arm/mach-mxs/clock-mx23.c
+++ b/arch/arm/mach-mxs/clock-mx23.c
@@ -21,6 +21,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/jiffies.h>
+#include <linux/clkdev.h>
 
 #include <asm/clkdev.h>
 #include <asm/div64.h>
@@ -437,10 +438,12 @@
 	},
 
 static struct clk_lookup lookups[] = {
-	_REGISTER_CLOCK("mxs-duart.0", NULL, uart_clk)
+	/* for amba bus driver */
+	_REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
+	/* for amba-pl011 driver */
+	_REGISTER_CLOCK("duart", NULL, uart_clk)
 	_REGISTER_CLOCK("rtc", NULL, rtc_clk)
 	_REGISTER_CLOCK(NULL, "hclk", hbus_clk)
-	_REGISTER_CLOCK(NULL, "xclk", xbus_clk)
 	_REGISTER_CLOCK(NULL, "usb", usb_clk)
 	_REGISTER_CLOCK(NULL, "audio", audio_clk)
 	_REGISTER_CLOCK(NULL, "pwm", pwm_clk)
@@ -518,6 +521,12 @@
 {
 	clk_misc_init();
 
+	clk_enable(&cpu_clk);
+	clk_enable(&hbus_clk);
+	clk_enable(&xbus_clk);
+	clk_enable(&emi_clk);
+	clk_enable(&uart_clk);
+
 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
 
 	mxs_timer_init(&clk32k_clk, MX23_INT_TIMER0);
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index 74e2103..56312c0 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -21,6 +21,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/jiffies.h>
+#include <linux/clkdev.h>
 
 #include <asm/clkdev.h>
 #include <asm/div64.h>
@@ -602,7 +603,12 @@
 	},
 
 static struct clk_lookup lookups[] = {
-	_REGISTER_CLOCK("mxs-duart.0", NULL, uart_clk)
+	/* for amba bus driver */
+	_REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
+	/* for amba-pl011 driver */
+	_REGISTER_CLOCK("duart", NULL, uart_clk)
+	_REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
+	_REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
 	_REGISTER_CLOCK("fec.0", NULL, fec_clk)
 	_REGISTER_CLOCK("rtc", NULL, rtc_clk)
 	_REGISTER_CLOCK("pll2", NULL, pll2_clk)
@@ -726,6 +732,12 @@
 {
 	clk_misc_init();
 
+	clk_enable(&cpu_clk);
+	clk_enable(&hbus_clk);
+	clk_enable(&xbus_clk);
+	clk_enable(&emi_clk);
+	clk_enable(&uart_clk);
+
 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
 
 	mxs_timer_init(&clk32k_clk, MX28_INT_TIMER0);
diff --git a/arch/arm/mach-mxs/devices-mx23.h b/arch/arm/mach-mxs/devices-mx23.h
index d0f49fc..1256788 100644
--- a/arch/arm/mach-mxs/devices-mx23.h
+++ b/arch/arm/mach-mxs/devices-mx23.h
@@ -11,6 +11,6 @@
 #include <mach/mx23.h>
 #include <mach/devices-common.h>
 
-extern const struct mxs_duart_data mx23_duart_data __initconst;
+extern const struct amba_device mx23_duart_device __initconst;
 #define mx23_add_duart() \
-	mxs_add_duart(&mx23_duart_data)
+	mxs_add_duart(&mx23_duart_device)
diff --git a/arch/arm/mach-mxs/devices-mx28.h b/arch/arm/mach-mxs/devices-mx28.h
index 00b736c..33773a6 100644
--- a/arch/arm/mach-mxs/devices-mx28.h
+++ b/arch/arm/mach-mxs/devices-mx28.h
@@ -11,9 +11,9 @@
 #include <mach/mx28.h>
 #include <mach/devices-common.h>
 
-extern const struct mxs_duart_data mx28_duart_data __initconst;
+extern const struct amba_device mx28_duart_device __initconst;
 #define mx28_add_duart() \
-	mxs_add_duart(&mx28_duart_data)
+	mxs_add_duart(&mx28_duart_device)
 
 extern const struct mxs_fec_data mx28_fec_data[] __initconst;
 #define mx28_add_fec(id, pdata) \
diff --git a/arch/arm/mach-mxs/devices.c b/arch/arm/mach-mxs/devices.c
index 6b60f02..c20d547 100644
--- a/arch/arm/mach-mxs/devices.c
+++ b/arch/arm/mach-mxs/devices.c
@@ -19,9 +19,8 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/init.h>
-#include <linux/err.h>
 #include <linux/platform_device.h>
-#include <mach/common.h>
+#include <linux/amba/bus.h>
 
 struct platform_device *__init mxs_add_platform_device_dmamask(
 		const char *name, int id,
@@ -73,3 +72,17 @@
 
 	return pdev;
 }
+
+int __init mxs_add_amba_device(const struct amba_device *dev)
+{
+	struct amba_device *adev = kmalloc(sizeof(*adev), GFP_KERNEL);
+
+	if (!adev) {
+		pr_err("%s: failed to allocate memory", __func__);
+		return -ENOMEM;
+	}
+
+	*adev = *dev;
+
+	return amba_device_register(adev, &iomem_resource);
+}
diff --git a/arch/arm/mach-mxs/devices/Kconfig b/arch/arm/mach-mxs/devices/Kconfig
index a35a2dc..cf7dc1a 100644
--- a/arch/arm/mach-mxs/devices/Kconfig
+++ b/arch/arm/mach-mxs/devices/Kconfig
@@ -1,5 +1,6 @@
-config MXS_HAVE_PLATFORM_DUART
+config MXS_HAVE_AMBA_DUART
 	bool
+	select ARM_AMBA
 
 config MXS_HAVE_PLATFORM_FEC
 	bool
diff --git a/arch/arm/mach-mxs/devices/Makefile b/arch/arm/mach-mxs/devices/Makefile
index 4b5266a..d0a09f6 100644
--- a/arch/arm/mach-mxs/devices/Makefile
+++ b/arch/arm/mach-mxs/devices/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_MXS_HAVE_PLATFORM_DUART) += platform-duart.o
+obj-$(CONFIG_MXS_HAVE_AMBA_DUART) += amba-duart.o
 obj-$(CONFIG_MXS_HAVE_PLATFORM_FEC) += platform-fec.o
diff --git a/arch/arm/mach-mxs/devices/amba-duart.c b/arch/arm/mach-mxs/devices/amba-duart.c
new file mode 100644
index 0000000..a559db0
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/amba-duart.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <asm/irq.h>
+#include <mach/mx23.h>
+#include <mach/mx28.h>
+#include <mach/devices-common.h>
+
+#define MXS_AMBA_DUART_DEVICE(name, soc)			\
+const struct amba_device name##_device __initconst = {		\
+	.dev = {						\
+		.init_name = "duart",				\
+	},							\
+	.res = {						\
+		.start = soc ## _DUART_BASE_ADDR,		\
+		.end = (soc ## _DUART_BASE_ADDR) + SZ_8K - 1,	\
+		.flags = IORESOURCE_MEM,			\
+	},							\
+	.irq = {soc ## _INT_DUART, NO_IRQ},			\
+}
+
+#ifdef CONFIG_SOC_IMX23
+MXS_AMBA_DUART_DEVICE(mx23_duart, MX23);
+#endif
+
+#ifdef CONFIG_SOC_IMX28
+MXS_AMBA_DUART_DEVICE(mx28_duart, MX28);
+#endif
+
+int __init mxs_add_duart(const struct amba_device *dev)
+{
+	return mxs_add_amba_device(dev);
+}
diff --git a/arch/arm/mach-mxs/devices/platform-duart.c b/arch/arm/mach-mxs/devices/platform-duart.c
deleted file mode 100644
index 2fe0df5..0000000
--- a/arch/arm/mach-mxs/devices/platform-duart.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- */
-#include <mach/mx23.h>
-#include <mach/mx28.h>
-#include <mach/devices-common.h>
-
-#define mxs_duart_data_entry(soc)					\
-	{								\
-		.iobase = soc ## _DUART_BASE_ADDR,			\
-		.irq = soc ## _INT_DUART,				\
-	}
-
-#ifdef CONFIG_SOC_IMX23
-const struct mxs_duart_data mx23_duart_data __initconst =
-	mxs_duart_data_entry(MX23);
-#endif
-
-#ifdef CONFIG_SOC_IMX28
-const struct mxs_duart_data mx28_duart_data __initconst =
-	mxs_duart_data_entry(MX28);
-#endif
-
-struct platform_device *__init mxs_add_duart(
-		const struct mxs_duart_data *data)
-{
-	struct resource res[] = {
-		{
-			.start = data->iobase,
-			.end = data->iobase + SZ_8K - 1,
-			.flags = IORESOURCE_MEM,
-		}, {
-			.start = data->irq,
-			.end = data->irq,
-			.flags = IORESOURCE_IRQ,
-		},
-	};
-
-	return mxs_add_platform_device("mxs-duart", 0, res, ARRAY_SIZE(res),
-					NULL, 0);
-}
diff --git a/arch/arm/mach-mxs/devices/platform-fec.c b/arch/arm/mach-mxs/devices/platform-fec.c
index c08168c..c42dff7 100644
--- a/arch/arm/mach-mxs/devices/platform-fec.c
+++ b/arch/arm/mach-mxs/devices/platform-fec.c
@@ -45,6 +45,6 @@
 		},
 	};
 
-	return mxs_add_platform_device("fec", data->id,
+	return mxs_add_platform_device("imx28-fec", data->id,
 			res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
 }
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 3da48d4..6c3d1a1 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/init.h>
+#include <linux/amba/bus.h>
 
 struct platform_device *mxs_add_platform_device_dmamask(
 		const char *name, int id,
@@ -24,14 +25,10 @@
 			name, id, res, num_resources, data, size_data, 0);
 }
 
+int __init mxs_add_amba_device(const struct amba_device *dev);
+
 /* duart */
-struct mxs_duart_data {
-	resource_size_t iobase;
-	resource_size_t iosize;
-	resource_size_t irq;
-};
-struct platform_device *__init mxs_add_duart(
-		const struct mxs_duart_data *data);
+int __init mxs_add_duart(const struct amba_device *dev);
 
 /* fec */
 #include <linux/fec.h>
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index d162e95..8e2c597 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -57,6 +57,19 @@
 		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
 	MX28_PAD_ENET_CLK__CLKCTRL_ENET |
 		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	/* fec1 */
+	MX28_PAD_ENET0_CRS__ENET1_RX_EN |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_RXD2__ENET1_RXD0 |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_RXD3__ENET1_RXD1 |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_COL__ENET1_TX_EN |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_TXD2__ENET1_TXD0 |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_TXD3__ENET1_TXD1 |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
 	/* phy power line */
 	MX28_PAD_SSP1_DATA3__GPIO_2_15 |
 		(MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
@@ -106,8 +119,14 @@
 	gpio_set_value(MX28EVK_FEC_PHY_RESET, 1);
 }
 
-static const struct fec_platform_data mx28_fec_pdata __initconst = {
-	.phy = PHY_INTERFACE_MODE_RMII,
+static struct fec_platform_data mx28_fec_pdata[] = {
+	{
+		/* fec0 */
+		.phy = PHY_INTERFACE_MODE_RMII,
+	}, {
+		/* fec1 */
+		.phy = PHY_INTERFACE_MODE_RMII,
+	},
 };
 
 static void __init mx28evk_init(void)
@@ -117,7 +136,8 @@
 	mx28_add_duart();
 
 	mx28evk_fec_reset();
-	mx28_add_fec(0, &mx28_fec_pdata);
+	mx28_add_fec(0, &mx28_fec_pdata[0]);
+	mx28_add_fec(1, &mx28_fec_pdata[1]);
 }
 
 static void __init mx28evk_timer_init(void)
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 43da8bb..29ffa75 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -88,13 +88,13 @@
 }
 
 static int
-netx_hif_irq_type(unsigned int _irq, unsigned int type)
+netx_hif_irq_type(struct irq_data *d, unsigned int type)
 {
 	unsigned int val, irq;
 
 	val = readl(NETX_DPMAS_IF_CONF1);
 
-	irq = _irq - NETX_IRQ_HIF_CHAINED(0);
+	irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
 
 	if (type & IRQ_TYPE_EDGE_RISING) {
 		DEBUG_IRQ("rising edges\n");
@@ -119,49 +119,49 @@
 }
 
 static void
-netx_hif_ack_irq(unsigned int _irq)
+netx_hif_ack_irq(struct irq_data *d)
 {
 	unsigned int val, irq;
 
-	irq = _irq - NETX_IRQ_HIF_CHAINED(0);
+	irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
 	writel((1 << 24) << irq, NETX_DPMAS_INT_STAT);
 
 	val = readl(NETX_DPMAS_INT_EN);
 	val &= ~((1 << 24) << irq);
 	writel(val, NETX_DPMAS_INT_EN);
 
-	DEBUG_IRQ("%s: irq %d\n", __func__, _irq);
+	DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
 }
 
 static void
-netx_hif_mask_irq(unsigned int _irq)
+netx_hif_mask_irq(struct irq_data *d)
 {
 	unsigned int val, irq;
 
-	irq = _irq - NETX_IRQ_HIF_CHAINED(0);
+	irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
 	val = readl(NETX_DPMAS_INT_EN);
 	val &= ~((1 << 24) << irq);
 	writel(val, NETX_DPMAS_INT_EN);
-	DEBUG_IRQ("%s: irq %d\n", __func__, _irq);
+	DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
 }
 
 static void
-netx_hif_unmask_irq(unsigned int _irq)
+netx_hif_unmask_irq(struct irq_data *d)
 {
 	unsigned int val, irq;
 
-	irq = _irq - NETX_IRQ_HIF_CHAINED(0);
+	irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
 	val = readl(NETX_DPMAS_INT_EN);
 	val |= (1 << 24) << irq;
 	writel(val, NETX_DPMAS_INT_EN);
-	DEBUG_IRQ("%s: irq %d\n", __func__, _irq);
+	DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
 }
 
 static struct irq_chip netx_hif_chip = {
-	.ack = netx_hif_ack_irq,
-	.mask = netx_hif_mask_irq,
-	.unmask = netx_hif_unmask_irq,
-	.set_type = netx_hif_irq_type,
+	.irq_ack = netx_hif_ack_irq,
+	.irq_mask = netx_hif_mask_irq,
+	.irq_unmask = netx_hif_unmask_irq,
+	.irq_set_type = netx_hif_irq_type,
 };
 
 void __init netx_init_irq(void)
diff --git a/arch/arm/mach-ns9xxx/board-a9m9750dev.c b/arch/arm/mach-ns9xxx/board-a9m9750dev.c
index b45bb3b..0c0d524 100644
--- a/arch/arm/mach-ns9xxx/board-a9m9750dev.c
+++ b/arch/arm/mach-ns9xxx/board-a9m9750dev.c
@@ -37,44 +37,44 @@
 		     ARRAY_SIZE(board_a9m9750dev_io_desc));
 }
 
-static void a9m9750dev_fpga_ack_irq(unsigned int irq)
+static void a9m9750dev_fpga_ack_irq(struct irq_data *d)
 {
 	/* nothing */
 }
 
-static void a9m9750dev_fpga_mask_irq(unsigned int irq)
+static void a9m9750dev_fpga_mask_irq(struct irq_data *d)
 {
 	u8 ier;
 
 	ier = __raw_readb(FPGA_IER);
 
-	ier &= ~(1 << (irq - FPGA_IRQ(0)));
+	ier &= ~(1 << (d->irq - FPGA_IRQ(0)));
 
 	__raw_writeb(ier, FPGA_IER);
 }
 
-static void a9m9750dev_fpga_maskack_irq(unsigned int irq)
+static void a9m9750dev_fpga_maskack_irq(struct irq_data *d)
 {
-	a9m9750dev_fpga_mask_irq(irq);
-	a9m9750dev_fpga_ack_irq(irq);
+	a9m9750dev_fpga_mask_irq(d);
+	a9m9750dev_fpga_ack_irq(d);
 }
 
-static void a9m9750dev_fpga_unmask_irq(unsigned int irq)
+static void a9m9750dev_fpga_unmask_irq(struct irq_data *d)
 {
 	u8 ier;
 
 	ier = __raw_readb(FPGA_IER);
 
-	ier |= 1 << (irq - FPGA_IRQ(0));
+	ier |= 1 << (d->irq - FPGA_IRQ(0));
 
 	__raw_writeb(ier, FPGA_IER);
 }
 
 static struct irq_chip a9m9750dev_fpga_chip = {
-	.ack		= a9m9750dev_fpga_ack_irq,
-	.mask		= a9m9750dev_fpga_mask_irq,
-	.mask_ack	= a9m9750dev_fpga_maskack_irq,
-	.unmask		= a9m9750dev_fpga_unmask_irq,
+	.irq_ack	= a9m9750dev_fpga_ack_irq,
+	.irq_mask	= a9m9750dev_fpga_mask_irq,
+	.irq_mask_ack	= a9m9750dev_fpga_maskack_irq,
+	.irq_unmask	= a9m9750dev_fpga_unmask_irq,
 };
 
 static void a9m9750dev_fpga_demux_handler(unsigned int irq,
@@ -82,7 +82,7 @@
 {
 	u8 stat = __raw_readb(FPGA_ISR);
 
-	desc->chip->mask_ack(irq);
+	desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
 
 	while (stat != 0) {
 		int irqno = fls(stat) - 1;
@@ -92,7 +92,7 @@
 		generic_handle_irq(FPGA_IRQ(irqno));
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 void __init board_a9m9750dev_init_irq(void)
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c
index 038f24d..389fa5c 100644
--- a/arch/arm/mach-ns9xxx/irq.c
+++ b/arch/arm/mach-ns9xxx/irq.c
@@ -22,40 +22,40 @@
 #define irq2prio(i) (i)
 #define prio2irq(p) (p)
 
-static void ns9xxx_mask_irq(unsigned int irq)
+static void ns9xxx_mask_irq(struct irq_data *d)
 {
 	/* XXX: better use cpp symbols */
-	int prio = irq2prio(irq);
+	int prio = irq2prio(d->irq);
 	u32 ic = __raw_readl(SYS_IC(prio / 4));
 	ic &= ~(1 << (7 + 8 * (3 - (prio & 3))));
 	__raw_writel(ic, SYS_IC(prio / 4));
 }
 
-static void ns9xxx_ack_irq(unsigned int irq)
+static void ns9xxx_ack_irq(struct irq_data *d)
 {
 	__raw_writel(0, SYS_ISRADDR);
 }
 
-static void ns9xxx_maskack_irq(unsigned int irq)
+static void ns9xxx_maskack_irq(struct irq_data *d)
 {
-	ns9xxx_mask_irq(irq);
-	ns9xxx_ack_irq(irq);
+	ns9xxx_mask_irq(d);
+	ns9xxx_ack_irq(d);
 }
 
-static void ns9xxx_unmask_irq(unsigned int irq)
+static void ns9xxx_unmask_irq(struct irq_data *d)
 {
 	/* XXX: better use cpp symbols */
-	int prio = irq2prio(irq);
+	int prio = irq2prio(d->irq);
 	u32 ic = __raw_readl(SYS_IC(prio / 4));
 	ic |= 1 << (7 + 8 * (3 - (prio & 3)));
 	__raw_writel(ic, SYS_IC(prio / 4));
 }
 
 static struct irq_chip ns9xxx_chip = {
-	.ack		= ns9xxx_ack_irq,
-	.mask		= ns9xxx_mask_irq,
-	.mask_ack	= ns9xxx_maskack_irq,
-	.unmask		= ns9xxx_unmask_irq,
+	.irq_ack	= ns9xxx_ack_irq,
+	.irq_mask	= ns9xxx_mask_irq,
+	.irq_mask_ack	= ns9xxx_maskack_irq,
+	.irq_unmask	= ns9xxx_unmask_irq,
 };
 
 #if 0
@@ -92,10 +92,10 @@
 
 	if (desc->status & IRQ_DISABLED)
 out_mask:
-		desc->chip->mask(irq);
+		desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 	/* ack unconditionally to unmask lower prio irqs */
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	raw_spin_unlock(&desc->lock);
 }
diff --git a/arch/arm/mach-nuc93x/irq.c b/arch/arm/mach-nuc93x/irq.c
index a7a88ea..1f8a05a 100644
--- a/arch/arm/mach-nuc93x/irq.c
+++ b/arch/arm/mach-nuc93x/irq.c
@@ -25,9 +25,9 @@
 #include <mach/hardware.h>
 #include <mach/regs-irq.h>
 
-static void nuc93x_irq_mask(unsigned int irq)
+static void nuc93x_irq_mask(struct irq_data *d)
 {
-	__raw_writel(1 << irq, REG_AIC_MDCR);
+	__raw_writel(1 << d->irq, REG_AIC_MDCR);
 }
 
 /*
@@ -35,21 +35,21 @@
  * to REG_AIC_EOSCR for ACK
  */
 
-static void nuc93x_irq_ack(unsigned int irq)
+static void nuc93x_irq_ack(struct irq_data *d)
 {
 	__raw_writel(0x01, REG_AIC_EOSCR);
 }
 
-static void nuc93x_irq_unmask(unsigned int irq)
+static void nuc93x_irq_unmask(struct irq_data *d)
 {
-	__raw_writel(1 << irq, REG_AIC_MECR);
+	__raw_writel(1 << d->irq, REG_AIC_MECR);
 
 }
 
 static struct irq_chip nuc93x_irq_chip = {
-	.ack	   = nuc93x_irq_ack,
-	.mask	   = nuc93x_irq_mask,
-	.unmask	   = nuc93x_irq_unmask,
+	.irq_ack	= nuc93x_irq_ack,
+	.irq_mask	= nuc93x_irq_mask,
+	.irq_unmask	= nuc93x_irq_unmask,
 };
 
 void __init nuc93x_init_irq(void)
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index 6c994e2..152b32c 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -49,7 +49,7 @@
 
 	irq_desc = irq_to_desc(IH_GPIO_BASE);
 	if (irq_desc)
-		irq_chip = irq_desc->chip;
+		irq_chip = irq_desc->irq_data.chip;
 
 	/*
 	 * For each handled GPIO interrupt, keep calling its interrupt handler
@@ -62,13 +62,15 @@
 
 		while (irq_counter[gpio] < fiq_count) {
 			if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
+				struct irq_data *d = irq_get_irq_data(irq_num);
+
 				/*
 				 * It looks like handle_edge_irq() that
 				 * OMAP GPIO edge interrupts default to,
 				 * expects interrupt already unmasked.
 				 */
-				if (irq_chip && irq_chip->unmask)
-					irq_chip->unmask(irq_num);
+				if (irq_chip && irq_chip->irq_unmask)
+					irq_chip->irq_unmask(d);
 			}
 			generic_handle_irq(irq_num);
 
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index bd0495a..22cc8c8 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -179,6 +179,22 @@
 	{ OMAP_TAG_LCD,		&ams_delta_lcd_config },
 };
 
+static struct resource ams_delta_nand_resources[] = {
+	[0] = {
+		.start	= OMAP1_MPUIO_BASE,
+		.end	= OMAP1_MPUIO_BASE +
+				OMAP_MPUIO_IO_CNTL + sizeof(u32) - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device ams_delta_nand_device = {
+	.name	= "ams-delta-nand",
+	.id	= -1,
+	.num_resources	= ARRAY_SIZE(ams_delta_nand_resources),
+	.resource	= ams_delta_nand_resources,
+};
+
 static struct resource ams_delta_kp_resources[] = {
 	[0] = {
 		.start	= INT_KEYBOARD,
@@ -265,6 +281,7 @@
 };
 
 static struct platform_device *ams_delta_devices[] __initdata = {
+	&ams_delta_nand_device,
 	&ams_delta_kp_device,
 	&ams_delta_lcd_device,
 	&ams_delta_led_device,
diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c
index 8780e75..0ace799 100644
--- a/arch/arm/mach-omap1/fpga.c
+++ b/arch/arm/mach-omap1/fpga.c
@@ -30,9 +30,9 @@
 #include <plat/fpga.h>
 #include <mach/gpio.h>
 
-static void fpga_mask_irq(unsigned int irq)
+static void fpga_mask_irq(struct irq_data *d)
 {
-	irq -= OMAP_FPGA_IRQ_BASE;
+	unsigned int irq = d->irq - OMAP_FPGA_IRQ_BASE;
 
 	if (irq < 8)
 		__raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_LO)
@@ -58,14 +58,14 @@
 }
 
 
-static void fpga_ack_irq(unsigned int irq)
+static void fpga_ack_irq(struct irq_data *d)
 {
 	/* Don't need to explicitly ACK FPGA interrupts */
 }
 
-static void fpga_unmask_irq(unsigned int irq)
+static void fpga_unmask_irq(struct irq_data *d)
 {
-	irq -= OMAP_FPGA_IRQ_BASE;
+	unsigned int irq = d->irq - OMAP_FPGA_IRQ_BASE;
 
 	if (irq < 8)
 		__raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_LO) | (1 << irq)),
@@ -78,10 +78,10 @@
 			      | (1 << (irq - 16))), INNOVATOR_FPGA_IMR2);
 }
 
-static void fpga_mask_ack_irq(unsigned int irq)
+static void fpga_mask_ack_irq(struct irq_data *d)
 {
-	fpga_mask_irq(irq);
-	fpga_ack_irq(irq);
+	fpga_mask_irq(d);
+	fpga_ack_irq(d);
 }
 
 void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc)
@@ -105,17 +105,17 @@
 
 static struct irq_chip omap_fpga_irq_ack = {
 	.name		= "FPGA-ack",
-	.ack		= fpga_mask_ack_irq,
-	.mask		= fpga_mask_irq,
-	.unmask		= fpga_unmask_irq,
+	.irq_ack	= fpga_mask_ack_irq,
+	.irq_mask	= fpga_mask_irq,
+	.irq_unmask	= fpga_unmask_irq,
 };
 
 
 static struct irq_chip omap_fpga_irq = {
 	.name		= "FPGA",
-	.ack		= fpga_ack_irq,
-	.mask		= fpga_mask_irq,
-	.unmask		= fpga_unmask_irq,
+	.irq_ack	= fpga_ack_irq,
+	.irq_mask	= fpga_mask_irq,
+	.irq_unmask	= fpga_unmask_irq,
 };
 
 /*
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 6bddbc8..4770158 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -70,48 +70,48 @@
 	omap_writel(value, irq_banks[bank].base_reg + offset);
 }
 
-static void omap_ack_irq(unsigned int irq)
+static void omap_ack_irq(struct irq_data *d)
 {
-	if (irq > 31)
+	if (d->irq > 31)
 		omap_writel(0x1, OMAP_IH2_BASE + IRQ_CONTROL_REG_OFFSET);
 
 	omap_writel(0x1, OMAP_IH1_BASE + IRQ_CONTROL_REG_OFFSET);
 }
 
-static void omap_mask_irq(unsigned int irq)
+static void omap_mask_irq(struct irq_data *d)
 {
-	int bank = IRQ_BANK(irq);
+	int bank = IRQ_BANK(d->irq);
 	u32 l;
 
 	l = omap_readl(irq_banks[bank].base_reg + IRQ_MIR_REG_OFFSET);
-	l |= 1 << IRQ_BIT(irq);
+	l |= 1 << IRQ_BIT(d->irq);
 	omap_writel(l, irq_banks[bank].base_reg + IRQ_MIR_REG_OFFSET);
 }
 
-static void omap_unmask_irq(unsigned int irq)
+static void omap_unmask_irq(struct irq_data *d)
 {
-	int bank = IRQ_BANK(irq);
+	int bank = IRQ_BANK(d->irq);
 	u32 l;
 
 	l = omap_readl(irq_banks[bank].base_reg + IRQ_MIR_REG_OFFSET);
-	l &= ~(1 << IRQ_BIT(irq));
+	l &= ~(1 << IRQ_BIT(d->irq));
 	omap_writel(l, irq_banks[bank].base_reg + IRQ_MIR_REG_OFFSET);
 }
 
-static void omap_mask_ack_irq(unsigned int irq)
+static void omap_mask_ack_irq(struct irq_data *d)
 {
-	omap_mask_irq(irq);
-	omap_ack_irq(irq);
+	omap_mask_irq(d);
+	omap_ack_irq(d);
 }
 
-static int omap_wake_irq(unsigned int irq, unsigned int enable)
+static int omap_wake_irq(struct irq_data *d, unsigned int enable)
 {
-	int bank = IRQ_BANK(irq);
+	int bank = IRQ_BANK(d->irq);
 
 	if (enable)
-		irq_banks[bank].wake_enable |= IRQ_BIT(irq);
+		irq_banks[bank].wake_enable |= IRQ_BIT(d->irq);
 	else
-		irq_banks[bank].wake_enable &= ~IRQ_BIT(irq);
+		irq_banks[bank].wake_enable &= ~IRQ_BIT(d->irq);
 
 	return 0;
 }
@@ -168,10 +168,10 @@
 
 static struct irq_chip omap_irq_chip = {
 	.name		= "MPU",
-	.ack		= omap_mask_ack_irq,
-	.mask		= omap_mask_irq,
-	.unmask		= omap_unmask_irq,
-	.set_wake	= omap_wake_irq,
+	.irq_ack	= omap_mask_ack_irq,
+	.irq_mask	= omap_mask_irq,
+	.irq_unmask	= omap_unmask_irq,
+	.irq_set_wake	= omap_wake_irq,
 };
 
 void __init omap_init_irq(void)
@@ -239,9 +239,9 @@
 	/* Unmask level 2 handler */
 
 	if (cpu_is_omap7xx())
-		omap_unmask_irq(INT_7XX_IH2_IRQ);
+		omap_unmask_irq(irq_get_irq_data(INT_7XX_IH2_IRQ));
 	else if (cpu_is_omap15xx())
-		omap_unmask_irq(INT_1510_IH2_IRQ);
+		omap_unmask_irq(irq_get_irq_data(INT_1510_IH2_IRQ));
 	else if (cpu_is_omap16xx())
-		omap_unmask_irq(INT_1610_IH2_IRQ);
+		omap_unmask_irq(irq_get_irq_data(INT_1610_IH2_IRQ));
 }
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index a70bdf2..07d1b20 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -554,6 +554,7 @@
 
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
+	OMAP4_MUX(USBB2_ULPITLL_CLK, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #else
@@ -576,11 +577,12 @@
 	omap4_twl6030_hsmmc_init(mmc);
 
 	/* Power on the ULPI PHY */
-	if (gpio_is_valid(OMAP4SDP_MDM_PWR_EN_GPIO)) {
-		/* FIXME: Assumes pad is already muxed for GPIO mode */
-		gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3");
+	status = gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3");
+	if (status)
+		pr_err("%s: Could not get USBB1 PHY GPIO\n", __func__);
+	else
 		gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1);
-	}
+
 	usb_ehci_init(&ehci_pdata);
 	usb_musb_init(&musb_board_data);
 
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index ebaa230..3be85a1 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -17,6 +17,7 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
+#include <linux/input.h>
 
 #include <linux/regulator/machine.h>
 #include <linux/regulator/fixed.h>
@@ -541,6 +542,37 @@
 	.audio = &igep2_audio_data,
 };
 
+static int igep2_keymap[] = {
+	KEY(0, 0, KEY_LEFT),
+	KEY(0, 1, KEY_RIGHT),
+	KEY(0, 2, KEY_A),
+	KEY(0, 3, KEY_B),
+	KEY(1, 0, KEY_DOWN),
+	KEY(1, 1, KEY_UP),
+	KEY(1, 2, KEY_E),
+	KEY(1, 3, KEY_F),
+	KEY(2, 0, KEY_ENTER),
+	KEY(2, 1, KEY_I),
+	KEY(2, 2, KEY_J),
+	KEY(2, 3, KEY_K),
+	KEY(3, 0, KEY_M),
+	KEY(3, 1, KEY_N),
+	KEY(3, 2, KEY_O),
+	KEY(3, 3, KEY_P)
+};
+
+static struct matrix_keymap_data igep2_keymap_data = {
+	.keymap			= igep2_keymap,
+	.keymap_size		= ARRAY_SIZE(igep2_keymap),
+};
+
+static struct twl4030_keypad_data igep2_keypad_pdata = {
+	.keymap_data	= &igep2_keymap_data,
+	.rows		= 4,
+	.cols		= 4,
+	.rep		= 1,
+};
+
 static struct twl4030_platform_data igep2_twldata = {
 	.irq_base	= TWL4030_IRQ_BASE,
 	.irq_end	= TWL4030_IRQ_END,
@@ -549,6 +581,7 @@
 	.usb		= &igep2_usb_data,
 	.codec		= &igep2_codec_data,
 	.gpio		= &igep2_twl4030_gpio_pdata,
+	.keypad		= &igep2_keypad_pdata,
 	.vmmc1          = &igep2_vmmc1,
 	.vpll2		= &igep2_vpll2,
 	.vio		= &igep2_vio,
diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c
index bcccd68..4dc62a9 100644
--- a/arch/arm/mach-omap2/board-igep0030.c
+++ b/arch/arm/mach-omap2/board-igep0030.c
@@ -19,6 +19,7 @@
 #include <linux/interrupt.h>
 
 #include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 #include <linux/i2c/twl.h>
 #include <linux/mmc/host.h>
 
@@ -43,7 +44,7 @@
 #define IGEP3_GPIO_WIFI_NRESET	139
 #define IGEP3_GPIO_BT_NRESET	137
 
-#define IGEP3_GPIO_USBH_NRESET  115
+#define IGEP3_GPIO_USBH_NRESET  183
 
 
 #if defined(CONFIG_MTD_ONENAND_OMAP2) || \
@@ -103,7 +104,7 @@
 	},
 };
 
-void __init igep3_flash_init(void)
+static void __init igep3_flash_init(void)
 {
 	u8 cs = 0;
 	u8 onenandcs = GPMC_CS_NUM + 1;
@@ -137,12 +138,11 @@
 }
 
 #else
-void __init igep3_flash_init(void) {}
+static void __init igep3_flash_init(void) {}
 #endif
 
-static struct regulator_consumer_supply igep3_vmmc1_supply = {
-	.supply		= "vmmc",
-};
+static struct regulator_consumer_supply igep3_vmmc1_supply =
+	REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
 
 /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
 static struct regulator_init_data igep3_vmmc1 = {
@@ -159,6 +159,52 @@
 	.consumer_supplies      = &igep3_vmmc1_supply,
 };
 
+static struct regulator_consumer_supply igep3_vio_supply =
+	REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.1");
+
+static struct regulator_init_data igep3_vio = {
+	.constraints = {
+		.min_uV			= 1800000,
+		.max_uV			= 1800000,
+		.apply_uV		= 1,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &igep3_vio_supply,
+};
+
+static struct regulator_consumer_supply igep3_vmmc2_supply =
+	REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+
+static struct regulator_init_data igep3_vmmc2 = {
+	.constraints	= {
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
+		.always_on		= 1,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &igep3_vmmc2_supply,
+};
+
+static struct fixed_voltage_config igep3_vwlan = {
+	.supply_name		= "vwlan",
+	.microvolts		= 3300000,
+	.gpio			= -EINVAL,
+	.enabled_at_boot	= 1,
+	.init_data		= &igep3_vmmc2,
+};
+
+static struct platform_device igep3_vwlan_device = {
+	.name	= "reg-fixed-voltage",
+	.id	= 0,
+	.dev	= {
+		.platform_data = &igep3_vwlan,
+	},
+};
+
 static struct omap2_hsmmc_info mmc[] = {
 	[0] = {
 		.mmc		= 1,
@@ -254,12 +300,6 @@
 	mmc[0].gpio_cd = gpio + 0;
 	omap2_hsmmc_init(mmc);
 
-	/*
-	 * link regulators to MMC adapters ... we "know" the
-	 * regulators will be set up only *after* we return.
-	 */
-	igep3_vmmc1_supply.dev = mmc[0].dev;
-
 	/* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
 #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
 	if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0)
@@ -287,6 +327,10 @@
 	.usb_mode	= T2_USB_MODE_ULPI,
 };
 
+static struct platform_device *igep3_devices[] __initdata = {
+	&igep3_vwlan_device,
+};
+
 static void __init igep3_init_irq(void)
 {
 	omap2_init_common_infrastructure();
@@ -303,6 +347,7 @@
 	.usb		= &igep3_twl4030_usb_data,
 	.gpio		= &igep3_twl4030_gpio_pdata,
 	.vmmc1		= &igep3_vmmc1,
+	.vio		= &igep3_vio,
 };
 
 static struct i2c_board_info __initdata igep3_i2c_boardinfo[] = {
@@ -363,8 +408,20 @@
 void __init igep3_wifi_bt_init(void) {}
 #endif
 
+static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+	.port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+	.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+
+	.phy_reset = true,
+	.reset_gpio_port[0] = -EINVAL,
+	.reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET,
+	.reset_gpio_port[2] = -EINVAL,
+};
+
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
+	OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #endif
@@ -375,9 +432,10 @@
 
 	/* Register I2C busses and drivers */
 	igep3_i2c_init();
-
+	platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices));
 	omap_serial_init();
 	usb_musb_init(&musb_board_data);
+	usb_ehci_init(&ehci_pdata);
 
 	igep3_flash_init();
 	igep3_leds_init();
@@ -392,6 +450,7 @@
 
 MACHINE_START(IGEP0030, "IGEP OMAP3 module")
 	.boot_params	= 0x80000100,
+	.reserve	= omap_reserve,
 	.map_io		= omap3_map_io,
 	.init_irq	= igep3_init_irq,
 	.init_machine	= igep3_init,
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index a4fe8e1..46d814a 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -207,7 +207,7 @@
 	.driver_name = "generic_dpi_panel",
 	.data = &dvi_panel,
 	.phy.dpi.data_lines = 24,
-	.reset_gpio = 170,
+	.reset_gpio = -EINVAL,
 };
 
 static struct omap_dss_device beagle_tv_device = {
@@ -279,6 +279,8 @@
 static int beagle_twl_gpio_setup(struct device *dev,
 		unsigned gpio, unsigned ngpio)
 {
+	int r;
+
 	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
 		mmc[0].gpio_wp = -EINVAL;
 	} else if ((omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_C1_3) ||
@@ -299,17 +301,63 @@
 	/* REVISIT: need ehci-omap hooks for external VBUS
 	 * power switch and overcurrent detect
 	 */
+	if (omap3_beagle_get_rev() != OMAP3BEAGLE_BOARD_XM) {
+		r = gpio_request(gpio + 1, "EHCI_nOC");
+		if (!r) {
+			r = gpio_direction_input(gpio + 1);
+			if (r)
+				gpio_free(gpio + 1);
+		}
+		if (r)
+			pr_err("%s: unable to configure EHCI_nOC\n", __func__);
+	}
 
-	gpio_request(gpio + 1, "EHCI_nOC");
-	gpio_direction_input(gpio + 1);
-
-	/* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
+	/*
+	 * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active
+	 * high / others active low)
+	 */
 	gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
-	gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
+		gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1);
+	else
+		gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+
+	/* DVI reset GPIO is different between beagle revisions */
+	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
+		beagle_dvi_device.reset_gpio = 129;
+	else
+		beagle_dvi_device.reset_gpio = 170;
 
 	/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
 	gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
 
+	/*
+	 * gpio + 1 on Xm controls the TFP410's enable line (active low)
+	 * gpio + 2 control varies depending on the board rev as follows:
+	 * P7/P8 revisions(prototype): Camera EN
+	 * A2+ revisions (production): LDO (supplies DVI, serial, led blocks)
+	 */
+	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
+		r = gpio_request(gpio + 1, "nDVI_PWR_EN");
+		if (!r) {
+			r = gpio_direction_output(gpio + 1, 0);
+			if (r)
+				gpio_free(gpio + 1);
+		}
+		if (r)
+			pr_err("%s: unable to configure nDVI_PWR_EN\n",
+				__func__);
+		r = gpio_request(gpio + 2, "DVI_LDO_EN");
+		if (!r) {
+			r = gpio_direction_output(gpio + 2, 1);
+			if (r)
+				gpio_free(gpio + 2);
+		}
+		if (r)
+			pr_err("%s: unable to configure DVI_LDO_EN\n",
+				__func__);
+	}
+
 	return 0;
 }
 
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 3094e20..e001a04 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
+#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/leds.h>
 #include <linux/gpio.h>
@@ -95,7 +96,16 @@
 static void __init omap4_ehci_init(void)
 {
 	int ret;
+	struct clk *phy_ref_clk;
 
+	/* FREF_CLK3 provides the 19.2 MHz reference clock to the PHY */
+	phy_ref_clk = clk_get(NULL, "auxclk3_ck");
+	if (IS_ERR(phy_ref_clk)) {
+		pr_err("Cannot request auxclk3\n");
+		goto error1;
+	}
+	clk_set_rate(phy_ref_clk, 19200000);
+	clk_enable(phy_ref_clk);
 
 	/* disable the power to the usb hub prior to init */
 	ret = gpio_request(GPIO_HUB_POWER, "hub_power");
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 14d95af..e0e040f 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -192,7 +192,7 @@
 	},
 };
 
-struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
+static struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
 	.irq = OMAP_GPIO_IRQ(OMAP_ZOOM_WLAN_IRQ_GPIO),
 	/* ZOOM ref clock is 26 MHz */
 	.board_ref_clock = 1,
@@ -286,7 +286,7 @@
 }
 
 /* EXTMUTE callback function */
-void zoom2_set_hs_extmute(int mute)
+static void zoom2_set_hs_extmute(int mute)
 {
 	gpio_set_value(ZOOM2_HEADSET_EXTMUTE_GPIO, mute);
 }
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index d3ab1c9e..403a4a1 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3286,7 +3286,7 @@
 	CLK(NULL,	"cpefuse_fck",	&cpefuse_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"ts_fck",	&ts_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"usbtll_fck",	&usbtll_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
-	CLK("ehci-omap.0",	"usbtll_fck",	&usbtll_fck,	CK_3430ES2 | CK_AM35XX),
+	CLK("ehci-omap.0",	"usbtll_fck",	&usbtll_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK("omap-mcbsp.1",	"prcm_fck",	&core_96m_fck,	CK_3XXX),
 	CLK("omap-mcbsp.5",	"prcm_fck",	&core_96m_fck,	CK_3XXX),
 	CLK(NULL,	"core_96m_fck",	&core_96m_fck,	CK_3XXX),
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index de3faa2..9b459c2 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -103,9 +103,7 @@
 		const char *name;
 		struct powerdomain *ptr;
 	} pwrdm;
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
 	const u16 clktrctrl_mask;
-#endif
 	const u8 flags;
 	const u8 dep_bit;
 	const u8 prcm_partition;
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 381f4eb..2c9c912 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -978,7 +978,7 @@
 arch_initcall(omap2_init_devices);
 
 #if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
-struct omap_device_pm_latency omap_wdt_latency[] = {
+static struct omap_device_pm_latency omap_wdt_latency[] = {
 	[0] = {
 		.deactivate_func = omap_device_idle_hwmods,
 		.activate_func   = omap_device_enable_hwmods,
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 85bf8ca..23049c4 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -100,13 +100,14 @@
 }
 
 /* XXX: FIQ and additional INTC support (only MPU at the moment) */
-static void omap_ack_irq(unsigned int irq)
+static void omap_ack_irq(struct irq_data *d)
 {
 	intc_bank_write_reg(0x1, &irq_banks[0], INTC_CONTROL);
 }
 
-static void omap_mask_irq(unsigned int irq)
+static void omap_mask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	int offset = irq & (~(IRQ_BITS_PER_REG - 1));
 
 	if (cpu_is_omap34xx()) {
@@ -128,8 +129,9 @@
 	intc_bank_write_reg(1 << irq, &irq_banks[0], INTC_MIR_SET0 + offset);
 }
 
-static void omap_unmask_irq(unsigned int irq)
+static void omap_unmask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	int offset = irq & (~(IRQ_BITS_PER_REG - 1));
 
 	irq &= (IRQ_BITS_PER_REG - 1);
@@ -137,17 +139,17 @@
 	intc_bank_write_reg(1 << irq, &irq_banks[0], INTC_MIR_CLEAR0 + offset);
 }
 
-static void omap_mask_ack_irq(unsigned int irq)
+static void omap_mask_ack_irq(struct irq_data *d)
 {
-	omap_mask_irq(irq);
-	omap_ack_irq(irq);
+	omap_mask_irq(d);
+	omap_ack_irq(d);
 }
 
 static struct irq_chip omap_irq_chip = {
-	.name	= "INTC",
-	.ack	= omap_mask_ack_irq,
-	.mask	= omap_mask_irq,
-	.unmask	= omap_unmask_irq,
+	.name		= "INTC",
+	.irq_ack	= omap_mask_ack_irq,
+	.irq_mask	= omap_mask_irq,
+	.irq_unmask	= omap_unmask_irq,
 };
 
 static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 17bd639..df8d2f2 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -893,7 +893,7 @@
 		return NULL;
 
 	m = &entry->mux;
-	memcpy(m, src, sizeof(struct omap_mux_entry));
+	entry->mux = *src;
 
 #ifdef CONFIG_OMAP_MUX
 	if (omap_mux_copy_names(src, m)) {
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index 440c98e..17f80e4 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -703,7 +703,7 @@
  * Signals different on CBC package compared to the superset
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBC)
-struct omap_mux __initdata omap3_cbc_subset[] = {
+static struct omap_mux __initdata omap3_cbc_subset[] = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #else
@@ -721,7 +721,7 @@
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)	\
 		&& defined(CONFIG_OMAP_PACKAGE_CBC)
-struct omap_ball __initdata omap3_cbc_ball[] = {
+static struct omap_ball __initdata omap3_cbc_ball[] = {
 	_OMAP3_BALLENTRY(CAM_D0, "ae16", NULL),
 	_OMAP3_BALLENTRY(CAM_D1, "ae15", NULL),
 	_OMAP3_BALLENTRY(CAM_D10, "d25", NULL),
diff --git a/arch/arm/mach-omap2/mux44xx.c b/arch/arm/mach-omap2/mux44xx.c
index 980f11d..c322e7b 100644
--- a/arch/arm/mach-omap2/mux44xx.c
+++ b/arch/arm/mach-omap2/mux44xx.c
@@ -544,7 +544,7 @@
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
 		&& defined(CONFIG_OMAP_PACKAGE_CBL)
-struct omap_ball __initdata omap4_core_cbl_ball[] = {
+static struct omap_ball __initdata omap4_core_cbl_ball[] = {
 	_OMAP4_BALLENTRY(GPMC_AD0, "c12", NULL),
 	_OMAP4_BALLENTRY(GPMC_AD1, "d12", NULL),
 	_OMAP4_BALLENTRY(GPMC_AD2, "c13", NULL),
@@ -1262,7 +1262,7 @@
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
 		&& defined(CONFIG_OMAP_PACKAGE_CBS)
-struct omap_ball __initdata omap4_core_cbs_ball[] = {
+static struct omap_ball __initdata omap4_core_cbs_ball[] = {
 	_OMAP4_BALLENTRY(GPMC_AD0, "c12", NULL),
 	_OMAP4_BALLENTRY(GPMC_AD1, "d12", NULL),
 	_OMAP4_BALLENTRY(GPMC_AD2, "c13", NULL),
@@ -1546,7 +1546,7 @@
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
 		&& defined(CONFIG_OMAP_PACKAGE_CBL)
-struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
+static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
 	_OMAP4_BALLENTRY(SIM_IO, "h4", NULL),
 	_OMAP4_BALLENTRY(SIM_CLK, "j2", NULL),
 	_OMAP4_BALLENTRY(SIM_RESET, "g2", NULL),
diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c
index 15f8c6c..00e1d2b 100644
--- a/arch/arm/mach-omap2/omap_twl.c
+++ b/arch/arm/mach-omap2/omap_twl.c
@@ -20,6 +20,8 @@
 
 #include <plat/voltage.h>
 
+#include "pm.h"
+
 #define OMAP3_SRI2C_SLAVE_ADDR		0x12
 #define OMAP3_VDD_MPU_SR_CONTROL_REG	0x00
 #define OMAP3_VDD_CORE_SR_CONTROL_REG	0x01
@@ -60,17 +62,17 @@
 
 #define REG_SMPS_OFFSET         0xE0
 
-unsigned long twl4030_vsel_to_uv(const u8 vsel)
+static unsigned long twl4030_vsel_to_uv(const u8 vsel)
 {
 	return (((vsel * 125) + 6000)) * 100;
 }
 
-u8 twl4030_uv_to_vsel(unsigned long uv)
+static u8 twl4030_uv_to_vsel(unsigned long uv)
 {
 	return DIV_ROUND_UP(uv - 600000, 12500);
 }
 
-unsigned long twl6030_vsel_to_uv(const u8 vsel)
+static unsigned long twl6030_vsel_to_uv(const u8 vsel)
 {
 	/*
 	 * In TWL6030 depending on the value of SMPS_OFFSET
@@ -102,7 +104,7 @@
 		return ((((vsel - 1) * 125) + 6000)) * 100;
 }
 
-u8 twl6030_uv_to_vsel(unsigned long uv)
+static u8 twl6030_uv_to_vsel(unsigned long uv)
 {
 	/*
 	 * In TWL6030 depending on the value of SMPS_OFFSET
diff --git a/arch/arm/mach-omap2/pm_bus.c b/arch/arm/mach-omap2/pm_bus.c
index 784989f..5acd2ab 100644
--- a/arch/arm/mach-omap2/pm_bus.c
+++ b/arch/arm/mach-omap2/pm_bus.c
@@ -20,7 +20,7 @@
 #include <plat/omap-pm.h>
 
 #ifdef CONFIG_PM_RUNTIME
-int omap_pm_runtime_suspend(struct device *dev)
+static int omap_pm_runtime_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	int r, ret = 0;
@@ -37,7 +37,7 @@
 	return ret;
 };
 
-int omap_pm_runtime_resume(struct device *dev)
+static int omap_pm_runtime_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	int r;
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h
index 53d44f6..49654c8 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.h
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h
@@ -228,7 +228,67 @@
 
 
 #ifndef __ASSEMBLER__
-
+/*
+ * Stub omap2xxx/omap3xxx functions so that common files
+ * continue to build when custom builds are used
+ */
+#if defined(CONFIG_ARCH_OMAP4) && !(defined(CONFIG_ARCH_OMAP2) ||	\
+					defined(CONFIG_ARCH_OMAP3))
+static inline u32 omap2_prm_read_mod_reg(s16 module, u16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+}
+static inline u32 omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits,
+		s16 module, s16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline u32 omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline u32 omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline u32 omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+#else
 /* Power/reset management domain register get/set */
 extern u32 omap2_prm_read_mod_reg(s16 module, u16 idx);
 extern void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx);
@@ -242,6 +302,7 @@
 extern int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift);
 extern int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift);
 
+#endif	/* CONFIG_ARCH_OMAP4 */
 #endif
 
 /*
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index 786d685..b1e0af1 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -27,6 +27,7 @@
 #include <plat/voltage.h>
 
 #include "control.h"
+#include "pm.h"
 
 static bool sr_enable_on_init;
 
diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
index b0c4907..4067669 100644
--- a/arch/arm/mach-omap2/wd_timer.c
+++ b/arch/arm/mach-omap2/wd_timer.c
@@ -13,6 +13,8 @@
 
 #include <plat/omap_hwmod.h>
 
+#include "wd_timer.h"
+
 /*
  * In order to avoid any assumptions from bootloader regarding WDT
  * settings, WDT module is reset during init. This enables the watchdog
diff --git a/arch/arm/mach-pnx4008/irq.c b/arch/arm/mach-pnx4008/irq.c
index a9ce02b..c69c180 100644
--- a/arch/arm/mach-pnx4008/irq.c
+++ b/arch/arm/mach-pnx4008/irq.c
@@ -36,44 +36,44 @@
 
 static u8 pnx4008_irq_type[NR_IRQS] = PNX4008_IRQ_TYPES;
 
-static void pnx4008_mask_irq(unsigned int irq)
+static void pnx4008_mask_irq(struct irq_data *d)
 {
-	__raw_writel(__raw_readl(INTC_ER(irq)) & ~INTC_BIT(irq), INTC_ER(irq));	/* mask interrupt */
+	__raw_writel(__raw_readl(INTC_ER(d->irq)) & ~INTC_BIT(d->irq), INTC_ER(d->irq));	/* mask interrupt */
 }
 
-static void pnx4008_unmask_irq(unsigned int irq)
+static void pnx4008_unmask_irq(struct irq_data *d)
 {
-	__raw_writel(__raw_readl(INTC_ER(irq)) | INTC_BIT(irq), INTC_ER(irq));	/* unmask interrupt */
+	__raw_writel(__raw_readl(INTC_ER(d->irq)) | INTC_BIT(d->irq), INTC_ER(d->irq));	/* unmask interrupt */
 }
 
-static void pnx4008_mask_ack_irq(unsigned int irq)
+static void pnx4008_mask_ack_irq(struct irq_data *d)
 {
-	__raw_writel(__raw_readl(INTC_ER(irq)) & ~INTC_BIT(irq), INTC_ER(irq));	/* mask interrupt */
-	__raw_writel(INTC_BIT(irq), INTC_SR(irq));	/* clear interrupt status */
+	__raw_writel(__raw_readl(INTC_ER(d->irq)) & ~INTC_BIT(d->irq), INTC_ER(d->irq));	/* mask interrupt */
+	__raw_writel(INTC_BIT(d->irq), INTC_SR(d->irq));	/* clear interrupt status */
 }
 
-static int pnx4008_set_irq_type(unsigned int irq, unsigned int type)
+static int pnx4008_set_irq_type(struct irq_data *d, unsigned int type)
 {
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
-		__raw_writel(__raw_readl(INTC_ATR(irq)) | INTC_BIT(irq), INTC_ATR(irq));	/*edge sensitive */
-		__raw_writel(__raw_readl(INTC_APR(irq)) | INTC_BIT(irq), INTC_APR(irq));	/*rising edge */
-		set_irq_handler(irq, handle_edge_irq);
+		__raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq));	/*edge sensitive */
+		__raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq));	/*rising edge */
+		set_irq_handler(d->irq, handle_edge_irq);
 		break;
 	case IRQ_TYPE_EDGE_FALLING:
-		__raw_writel(__raw_readl(INTC_ATR(irq)) | INTC_BIT(irq), INTC_ATR(irq));	/*edge sensitive */
-		__raw_writel(__raw_readl(INTC_APR(irq)) & ~INTC_BIT(irq), INTC_APR(irq));	/*falling edge */
-		set_irq_handler(irq, handle_edge_irq);
+		__raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq));	/*edge sensitive */
+		__raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq));	/*falling edge */
+		set_irq_handler(d->irq, handle_edge_irq);
 		break;
 	case IRQ_TYPE_LEVEL_LOW:
-		__raw_writel(__raw_readl(INTC_ATR(irq)) & ~INTC_BIT(irq), INTC_ATR(irq));	/*level sensitive */
-		__raw_writel(__raw_readl(INTC_APR(irq)) & ~INTC_BIT(irq), INTC_APR(irq));	/*low level */
-		set_irq_handler(irq, handle_level_irq);
+		__raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq));	/*level sensitive */
+		__raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq));	/*low level */
+		set_irq_handler(d->irq, handle_level_irq);
 		break;
 	case IRQ_TYPE_LEVEL_HIGH:
-		__raw_writel(__raw_readl(INTC_ATR(irq)) & ~INTC_BIT(irq), INTC_ATR(irq));	/*level sensitive */
-		__raw_writel(__raw_readl(INTC_APR(irq)) | INTC_BIT(irq), INTC_APR(irq));	/* high level */
-		set_irq_handler(irq, handle_level_irq);
+		__raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq));	/*level sensitive */
+		__raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq));	/* high level */
+		set_irq_handler(d->irq, handle_level_irq);
 		break;
 
 	/* IRQ_TYPE_EDGE_BOTH is not supported */
@@ -85,10 +85,10 @@
 }
 
 static struct irq_chip pnx4008_irq_chip = {
-	.ack = pnx4008_mask_ack_irq,
-	.mask = pnx4008_mask_irq,
-	.unmask = pnx4008_unmask_irq,
-	.set_type = pnx4008_set_irq_type,
+	.irq_ack = pnx4008_mask_ack_irq,
+	.irq_mask = pnx4008_mask_irq,
+	.irq_unmask = pnx4008_unmask_irq,
+	.irq_set_type = pnx4008_set_irq_type,
 };
 
 void __init pnx4008_init_irq(void)
@@ -99,14 +99,18 @@
 	for (i = 0; i < NR_IRQS; i++) {
 		set_irq_flags(i, IRQF_VALID);
 		set_irq_chip(i, &pnx4008_irq_chip);
-		pnx4008_set_irq_type(i, pnx4008_irq_type[i]);
+		pnx4008_set_irq_type(irq_get_irq_data(i), pnx4008_irq_type[i]);
 	}
 
 	/* configure and enable IRQ 0,1,30,31 (cascade interrupts) */
-	pnx4008_set_irq_type(SUB1_IRQ_N, pnx4008_irq_type[SUB1_IRQ_N]);
-	pnx4008_set_irq_type(SUB2_IRQ_N, pnx4008_irq_type[SUB2_IRQ_N]);
-	pnx4008_set_irq_type(SUB1_FIQ_N, pnx4008_irq_type[SUB1_FIQ_N]);
-	pnx4008_set_irq_type(SUB2_FIQ_N, pnx4008_irq_type[SUB2_FIQ_N]);
+	pnx4008_set_irq_type(irq_get_irq_data(SUB1_IRQ_N),
+			     pnx4008_irq_type[SUB1_IRQ_N]);
+	pnx4008_set_irq_type(irq_get_irq_data(SUB2_IRQ_N),
+			     pnx4008_irq_type[SUB2_IRQ_N]);
+	pnx4008_set_irq_type(irq_get_irq_data(SUB1_FIQ_N),
+			     pnx4008_irq_type[SUB1_FIQ_N]);
+	pnx4008_set_irq_type(irq_get_irq_data(SUB2_FIQ_N),
+			     pnx4008_irq_type[SUB2_FIQ_N]);
 
 	/* mask all others */
 	__raw_writel((1 << SUB2_FIQ_N) | (1 << SUB1_FIQ_N) |
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index ccb2d0c..a134a14 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -477,25 +477,25 @@
 /******************************************************************************
  * FPGA IRQ
  ******************************************************************************/
-static void balloon3_mask_irq(unsigned int irq)
+static void balloon3_mask_irq(struct irq_data *d)
 {
-	int balloon3_irq = (irq - BALLOON3_IRQ(0));
+	int balloon3_irq = (d->irq - BALLOON3_IRQ(0));
 	balloon3_irq_enabled &= ~(1 << balloon3_irq);
 	__raw_writel(~balloon3_irq_enabled, BALLOON3_INT_CONTROL_REG);
 }
 
-static void balloon3_unmask_irq(unsigned int irq)
+static void balloon3_unmask_irq(struct irq_data *d)
 {
-	int balloon3_irq = (irq - BALLOON3_IRQ(0));
+	int balloon3_irq = (d->irq - BALLOON3_IRQ(0));
 	balloon3_irq_enabled |= (1 << balloon3_irq);
 	__raw_writel(~balloon3_irq_enabled, BALLOON3_INT_CONTROL_REG);
 }
 
 static struct irq_chip balloon3_irq_chip = {
 	.name		= "FPGA",
-	.ack		= balloon3_mask_irq,
-	.mask		= balloon3_mask_irq,
-	.unmask		= balloon3_unmask_irq,
+	.irq_ack	= balloon3_mask_irq,
+	.irq_mask	= balloon3_mask_irq,
+	.irq_unmask	= balloon3_unmask_irq,
 };
 
 static void balloon3_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -504,8 +504,13 @@
 					balloon3_irq_enabled;
 	do {
 		/* clear useless edge notification */
-		if (desc->chip->ack)
-			desc->chip->ack(BALLOON3_AUX_NIRQ);
+		if (desc->irq_data.chip->irq_ack) {
+			struct irq_data *d;
+
+			d = irq_get_irq_data(BALLOON3_AUX_NIRQ);
+			desc->irq_data.chip->irq_ack(d);
+		}
+
 		while (pending) {
 			irq = BALLOON3_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/clock-pxa3xx.c b/arch/arm/mach-pxa/clock-pxa3xx.c
index 1b08a34..3f864cd 100644
--- a/arch/arm/mach-pxa/clock-pxa3xx.c
+++ b/arch/arm/mach-pxa/clock-pxa3xx.c
@@ -115,7 +115,6 @@
 {
 	unsigned long acsr = ACSR;
 	unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
-	unsigned int smcfs = (acsr >> 23) & 0x7;
 
 	return BASE_CLK * smcfs_mult[(acsr >> 23) & 0x7] /
 			df_clkdiv[(memclkcfg >> 16) & 0x3];
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c
index 0f31305..a2380cd 100644
--- a/arch/arm/mach-pxa/cm-x2xx-pci.c
+++ b/arch/arm/mach-pxa/cm-x2xx-pci.c
@@ -59,7 +59,7 @@
 static void cmx2xx_it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
 {
 	/* clear our parent irq */
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	it8152_irq_demux(irq, desc);
 }
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index d6e15f7..f5d91ef 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -22,7 +22,6 @@
 
 #include <mach/hardware.h>
 #include <asm/system.h>
-#include <asm/pgtable.h>
 #include <asm/mach/map.h>
 #include <asm/mach-types.h>
 
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h
index 6205dc9..a079d8b 100644
--- a/arch/arm/mach-pxa/generic.h
+++ b/arch/arm/mach-pxa/generic.h
@@ -9,11 +9,13 @@
  * published by the Free Software Foundation.
  */
 
+struct irq_data;
 struct sys_timer;
 
 extern struct sys_timer pxa_timer;
 extern void __init pxa_init_irq(int irq_nr,
-				int (*set_wake)(unsigned int, unsigned int));
+				int (*set_wake)(struct irq_data *,
+						unsigned int));
 extern void __init pxa25x_init_irq(void);
 #ifdef CONFIG_CPU_PXA26x
 extern void __init pxa26x_init_irq(void);
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 54e91c9..2693e3c 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -53,37 +53,48 @@
 	return !cpu_is_pxa25x();
 }
 
-static void pxa_mask_irq(unsigned int irq)
+static inline void __iomem *irq_base(int i)
 {
-	void __iomem *base = get_irq_chip_data(irq);
+	static unsigned long phys_base[] = {
+		0x40d00000,
+		0x40d0009c,
+		0x40d00130,
+	};
+
+	return (void __iomem *)io_p2v(phys_base[i]);
+}
+
+static void pxa_mask_irq(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
 	uint32_t icmr = __raw_readl(base + ICMR);
 
-	icmr &= ~(1 << IRQ_BIT(irq));
+	icmr &= ~(1 << IRQ_BIT(d->irq));
 	__raw_writel(icmr, base + ICMR);
 }
 
-static void pxa_unmask_irq(unsigned int irq)
+static void pxa_unmask_irq(struct irq_data *d)
 {
-	void __iomem *base = get_irq_chip_data(irq);
+	void __iomem *base = irq_data_get_irq_chip_data(d);
 	uint32_t icmr = __raw_readl(base + ICMR);
 
-	icmr |= 1 << IRQ_BIT(irq);
+	icmr |= 1 << IRQ_BIT(d->irq);
 	__raw_writel(icmr, base + ICMR);
 }
 
 static struct irq_chip pxa_internal_irq_chip = {
 	.name		= "SC",
-	.ack		= pxa_mask_irq,
-	.mask		= pxa_mask_irq,
-	.unmask		= pxa_unmask_irq,
+	.irq_ack	= pxa_mask_irq,
+	.irq_mask	= pxa_mask_irq,
+	.irq_unmask	= pxa_unmask_irq,
 };
 
 /*
  * GPIO IRQs for GPIO 0 and 1
  */
-static int pxa_set_low_gpio_type(unsigned int irq, unsigned int type)
+static int pxa_set_low_gpio_type(struct irq_data *d, unsigned int type)
 {
-	int gpio = irq - IRQ_GPIO0;
+	int gpio = d->irq - IRQ_GPIO0;
 
 	if (__gpio_is_occupied(gpio)) {
 		pr_err("%s failed: GPIO is configured\n", __func__);
@@ -103,31 +114,17 @@
 	return 0;
 }
 
-static void pxa_ack_low_gpio(unsigned int irq)
+static void pxa_ack_low_gpio(struct irq_data *d)
 {
-	GEDR0 = (1 << (irq - IRQ_GPIO0));
-}
-
-static void pxa_mask_low_gpio(unsigned int irq)
-{
-	struct irq_desc *desc = irq_to_desc(irq);
-
-	desc->chip->mask(irq);
-}
-
-static void pxa_unmask_low_gpio(unsigned int irq)
-{
-	struct irq_desc *desc = irq_to_desc(irq);
-
-	desc->chip->unmask(irq);
+	GEDR0 = (1 << (d->irq - IRQ_GPIO0));
 }
 
 static struct irq_chip pxa_low_gpio_chip = {
 	.name		= "GPIO-l",
-	.ack		= pxa_ack_low_gpio,
-	.mask		= pxa_mask_low_gpio,
-	.unmask		= pxa_unmask_low_gpio,
-	.set_type	= pxa_set_low_gpio_type,
+	.irq_ack	= pxa_ack_low_gpio,
+	.irq_mask	= pxa_mask_irq,
+	.irq_unmask	= pxa_unmask_irq,
+	.irq_set_type	= pxa_set_low_gpio_type,
 };
 
 static void __init pxa_init_low_gpio_irq(set_wake_t fn)
@@ -141,22 +138,12 @@
 
 	for (irq = IRQ_GPIO0; irq <= IRQ_GPIO1; irq++) {
 		set_irq_chip(irq, &pxa_low_gpio_chip);
+		set_irq_chip_data(irq, irq_base(0));
 		set_irq_handler(irq, handle_edge_irq);
 		set_irq_flags(irq, IRQF_VALID);
 	}
 
-	pxa_low_gpio_chip.set_wake = fn;
-}
-
-static inline void __iomem *irq_base(int i)
-{
-	static unsigned long phys_base[] = {
-		0x40d00000,
-		0x40d0009c,
-		0x40d00130,
-	};
-
-	return (void __iomem *)io_p2v(phys_base[i >> 5]);
+	pxa_low_gpio_chip.irq_set_wake = fn;
 }
 
 void __init pxa_init_irq(int irq_nr, set_wake_t fn)
@@ -168,7 +155,7 @@
 	pxa_internal_irq_nr = irq_nr;
 
 	for (n = 0; n < irq_nr; n += 32) {
-		void __iomem *base = irq_base(n);
+		void __iomem *base = irq_base(n >> 5);
 
 		__raw_writel(0, base + ICMR);	/* disable all IRQs */
 		__raw_writel(0, base + ICLR);	/* all IRQs are IRQ, not FIQ */
@@ -188,7 +175,7 @@
 	/* only unmasked interrupts kick us out of idle */
 	__raw_writel(1, irq_base(0) + ICCR);
 
-	pxa_internal_irq_chip.set_wake = fn;
+	pxa_internal_irq_chip.irq_set_wake = fn;
 	pxa_init_low_gpio_irq(fn);
 }
 
@@ -200,7 +187,7 @@
 {
 	int i;
 
-	for (i = 0; i < pxa_internal_irq_nr; i += 32) {
+	for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
 		void __iomem *base = irq_base(i);
 
 		saved_icmr[i] = __raw_readl(base + ICMR);
@@ -219,14 +206,14 @@
 {
 	int i;
 
-	for (i = 0; i < pxa_internal_irq_nr; i += 32) {
+	for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
 		void __iomem *base = irq_base(i);
 
 		__raw_writel(saved_icmr[i], base + ICMR);
 		__raw_writel(0, base + ICLR);
 	}
 
-	if (!cpu_is_pxa25x())
+	if (cpu_has_ipr())
 		for (i = 0; i < pxa_internal_irq_nr; i++)
 			__raw_writel(saved_ipr[i], IRQ_BASE + IPR(i));
 
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 8ab62a6..c9a3e77 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -95,9 +95,9 @@
 
 static unsigned int lpd270_irq_enabled;
 
-static void lpd270_mask_irq(unsigned int irq)
+static void lpd270_mask_irq(struct irq_data *d)
 {
-	int lpd270_irq = irq - LPD270_IRQ(0);
+	int lpd270_irq = d->irq - LPD270_IRQ(0);
 
 	__raw_writew(~(1 << lpd270_irq), LPD270_INT_STATUS);
 
@@ -105,9 +105,9 @@
 	__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
 }
 
-static void lpd270_unmask_irq(unsigned int irq)
+static void lpd270_unmask_irq(struct irq_data *d)
 {
-	int lpd270_irq = irq - LPD270_IRQ(0);
+	int lpd270_irq = d->irq - LPD270_IRQ(0);
 
 	lpd270_irq_enabled |= 1 << lpd270_irq;
 	__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
@@ -115,9 +115,9 @@
 
 static struct irq_chip lpd270_irq_chip = {
 	.name		= "CPLD",
-	.ack		= lpd270_mask_irq,
-	.mask		= lpd270_mask_irq,
-	.unmask		= lpd270_unmask_irq,
+	.irq_ack	= lpd270_mask_irq,
+	.irq_mask	= lpd270_mask_irq,
+	.irq_unmask	= lpd270_unmask_irq,
 };
 
 static void lpd270_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -126,7 +126,8 @@
 
 	pending = __raw_readw(LPD270_INT_STATUS) & lpd270_irq_enabled;
 	do {
-		desc->chip->ack(irq);	/* clear useless edge notification */
+		/* clear useless edge notification */
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 		if (likely(pending)) {
 			irq = LPD270_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index 3072dbe..dca20de 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -122,15 +122,15 @@
 
 static unsigned long lubbock_irq_enabled;
 
-static void lubbock_mask_irq(unsigned int irq)
+static void lubbock_mask_irq(struct irq_data *d)
 {
-	int lubbock_irq = (irq - LUBBOCK_IRQ(0));
+	int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
 	LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq));
 }
 
-static void lubbock_unmask_irq(unsigned int irq)
+static void lubbock_unmask_irq(struct irq_data *d)
 {
-	int lubbock_irq = (irq - LUBBOCK_IRQ(0));
+	int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
 	/* the irq can be acknowledged only if deasserted, so it's done here */
 	LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq);
 	LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
@@ -138,16 +138,17 @@
 
 static struct irq_chip lubbock_irq_chip = {
 	.name		= "FPGA",
-	.ack		= lubbock_mask_irq,
-	.mask		= lubbock_mask_irq,
-	.unmask		= lubbock_unmask_irq,
+	.irq_ack	= lubbock_mask_irq,
+	.irq_mask	= lubbock_mask_irq,
+	.irq_unmask	= lubbock_unmask_irq,
 };
 
 static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
 	do {
-		desc->chip->ack(irq);	/* clear our parent irq */
+		/* clear our parent irq */
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 		if (likely(pending)) {
 			irq = LUBBOCK_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 740c035..d4b6f23 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -123,15 +123,15 @@
 
 static unsigned long mainstone_irq_enabled;
 
-static void mainstone_mask_irq(unsigned int irq)
+static void mainstone_mask_irq(struct irq_data *d)
 {
-	int mainstone_irq = (irq - MAINSTONE_IRQ(0));
+	int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
 	MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq));
 }
 
-static void mainstone_unmask_irq(unsigned int irq)
+static void mainstone_unmask_irq(struct irq_data *d)
 {
-	int mainstone_irq = (irq - MAINSTONE_IRQ(0));
+	int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
 	/* the irq can be acknowledged only if deasserted, so it's done here */
 	MST_INTSETCLR &= ~(1 << mainstone_irq);
 	MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
@@ -139,16 +139,17 @@
 
 static struct irq_chip mainstone_irq_chip = {
 	.name		= "FPGA",
-	.ack		= mainstone_mask_irq,
-	.mask		= mainstone_mask_irq,
-	.unmask		= mainstone_unmask_irq,
+	.irq_ack	= mainstone_mask_irq,
+	.irq_mask	= mainstone_mask_irq,
+	.irq_unmask	= mainstone_unmask_irq,
 };
 
 static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled;
 	do {
-		desc->chip->ack(irq);	/* clear useless edge notification */
+		/* clear useless edge notification */
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 		if (likely(pending)) {
 			irq = MAINSTONE_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index f33647a..90820fa 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -241,23 +241,23 @@
 
 static unsigned long pcm990_irq_enabled;
 
-static void pcm990_mask_ack_irq(unsigned int irq)
+static void pcm990_mask_ack_irq(struct irq_data *d)
 {
-	int pcm990_irq = (irq - PCM027_IRQ(0));
+	int pcm990_irq = (d->irq - PCM027_IRQ(0));
 	PCM990_INTMSKENA = (pcm990_irq_enabled &= ~(1 << pcm990_irq));
 }
 
-static void pcm990_unmask_irq(unsigned int irq)
+static void pcm990_unmask_irq(struct irq_data *d)
 {
-	int pcm990_irq = (irq - PCM027_IRQ(0));
+	int pcm990_irq = (d->irq - PCM027_IRQ(0));
 	/* the irq can be acknowledged only if deasserted, so it's done here */
 	PCM990_INTSETCLR |= 1 << pcm990_irq;
 	PCM990_INTMSKENA  = (pcm990_irq_enabled |= (1 << pcm990_irq));
 }
 
 static struct irq_chip pcm990_irq_chip = {
-	.mask_ack	= pcm990_mask_ack_irq,
-	.unmask		= pcm990_unmask_irq,
+	.irq_mask_ack	= pcm990_mask_ack_irq,
+	.irq_unmask	= pcm990_unmask_irq,
 };
 
 static void pcm990_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -265,7 +265,8 @@
 	unsigned long pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled;
 
 	do {
-		desc->chip->ack(irq);	/* clear our parent IRQ */
+		/* clear our parent IRQ */
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 		if (likely(pending)) {
 			irq = PCM027_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 3f5241c..fbc5b77 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -22,6 +22,7 @@
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
 #include <linux/sysdev.h>
+#include <linux/irq.h>
 
 #include <asm/mach/map.h>
 #include <mach/hardware.h>
@@ -282,15 +283,15 @@
 /* PXA25x: supports wakeup from GPIO0..GPIO15 and RTC alarm
  */
 
-static int pxa25x_set_wake(unsigned int irq, unsigned int on)
+static int pxa25x_set_wake(struct irq_data *d, unsigned int on)
 {
-	int gpio = IRQ_TO_GPIO(irq);
+	int gpio = IRQ_TO_GPIO(d->irq);
 	uint32_t mask = 0;
 
 	if (gpio >= 0 && gpio < 85)
 		return gpio_set_wake(gpio, on);
 
-	if (irq == IRQ_RTCAlrm) {
+	if (d->irq == IRQ_RTCAlrm) {
 		mask = PWER_RTC;
 		goto set_pwer;
 	}
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index b2130b7..987301f 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/sysdev.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 
 #include <asm/mach/map.h>
 #include <mach/hardware.h>
@@ -343,18 +344,18 @@
 /* PXA27x:  Various gpios can issue wakeup events.  This logic only
  * handles the simple cases, not the WEMUX2 and WEMUX3 options
  */
-static int pxa27x_set_wake(unsigned int irq, unsigned int on)
+static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
 {
-	int gpio = IRQ_TO_GPIO(irq);
+	int gpio = IRQ_TO_GPIO(d->irq);
 	uint32_t mask;
 
 	if (gpio >= 0 && gpio < 128)
 		return gpio_set_wake(gpio, on);
 
-	if (irq == IRQ_KEYPAD)
+	if (d->irq == IRQ_KEYPAD)
 		return keypad_set_wake(on);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_RTCAlrm:
 		mask = PWER_RTC;
 		break;
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index e14818f..a7a19e1 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -229,11 +229,11 @@
 	pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
 }
 
-static int pxa3xx_set_wake(unsigned int irq, unsigned int on)
+static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
 {
 	unsigned long flags, mask = 0;
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_SSP3:
 		mask = ADXER_MFP_WSSP3;
 		break;
@@ -322,40 +322,40 @@
 #define pxa3xx_set_wake	NULL
 #endif
 
-static void pxa_ack_ext_wakeup(unsigned int irq)
+static void pxa_ack_ext_wakeup(struct irq_data *d)
 {
-	PECR |= PECR_IS(irq - IRQ_WAKEUP0);
+	PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
 }
 
-static void pxa_mask_ext_wakeup(unsigned int irq)
+static void pxa_mask_ext_wakeup(struct irq_data *d)
 {
-	ICMR2 &= ~(1 << ((irq - PXA_IRQ(0)) & 0x1f));
-	PECR &= ~PECR_IE(irq - IRQ_WAKEUP0);
+	ICMR2 &= ~(1 << ((d->irq - PXA_IRQ(0)) & 0x1f));
+	PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
 }
 
-static void pxa_unmask_ext_wakeup(unsigned int irq)
+static void pxa_unmask_ext_wakeup(struct irq_data *d)
 {
-	ICMR2 |= 1 << ((irq - PXA_IRQ(0)) & 0x1f);
-	PECR |= PECR_IE(irq - IRQ_WAKEUP0);
+	ICMR2 |= 1 << ((d->irq - PXA_IRQ(0)) & 0x1f);
+	PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
 }
 
-static int pxa_set_ext_wakeup_type(unsigned int irq, unsigned int flow_type)
+static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
 {
 	if (flow_type & IRQ_TYPE_EDGE_RISING)
-		PWER |= 1 << (irq - IRQ_WAKEUP0);
+		PWER |= 1 << (d->irq - IRQ_WAKEUP0);
 
 	if (flow_type & IRQ_TYPE_EDGE_FALLING)
-		PWER |= 1 << (irq - IRQ_WAKEUP0 + 2);
+		PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
 
 	return 0;
 }
 
 static struct irq_chip pxa_ext_wakeup_chip = {
 	.name		= "WAKEUP",
-	.ack		= pxa_ack_ext_wakeup,
-	.mask		= pxa_mask_ext_wakeup,
-	.unmask		= pxa_unmask_ext_wakeup,
-	.set_type	= pxa_set_ext_wakeup_type,
+	.irq_ack	= pxa_ack_ext_wakeup,
+	.irq_mask	= pxa_mask_ext_wakeup,
+	.irq_unmask	= pxa_unmask_ext_wakeup,
+	.irq_set_type	= pxa_set_ext_wakeup_type,
 };
 
 static void __init pxa_init_ext_wakeup_irq(set_wake_t fn)
@@ -368,7 +368,7 @@
 		set_irq_flags(irq, IRQF_VALID);
 	}
 
-	pxa_ext_wakeup_chip.set_wake = fn;
+	pxa_ext_wakeup_chip.irq_set_wake = fn;
 }
 
 void __init pxa3xx_init_irq(void)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 0bc9387..b49a2c2 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -25,6 +25,7 @@
 #include <linux/spi/corgi_lcd.h>
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/mtd/sharpsl.h>
+#include <linux/mtd/physmap.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/regulator/machine.h>
 #include <linux/io.h>
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index de69b20..49eeeab 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -249,9 +249,9 @@
 	return viper_isa_irqs[bit] + PXA_ISA_IRQ(0);
 }
 
-static void viper_ack_irq(unsigned int irq)
+static void viper_ack_irq(struct irq_data *d)
 {
-	int viper_irq = viper_irq_to_bitmask(irq);
+	int viper_irq = viper_irq_to_bitmask(d->irq);
 
 	if (viper_irq & 0xff)
 		VIPER_LO_IRQ_STATUS = viper_irq;
@@ -259,14 +259,14 @@
 		VIPER_HI_IRQ_STATUS = (viper_irq >> 8);
 }
 
-static void viper_mask_irq(unsigned int irq)
+static void viper_mask_irq(struct irq_data *d)
 {
-	viper_irq_enabled_mask &= ~(viper_irq_to_bitmask(irq));
+	viper_irq_enabled_mask &= ~(viper_irq_to_bitmask(d->irq));
 }
 
-static void viper_unmask_irq(unsigned int irq)
+static void viper_unmask_irq(struct irq_data *d)
 {
-	viper_irq_enabled_mask |= viper_irq_to_bitmask(irq);
+	viper_irq_enabled_mask |= viper_irq_to_bitmask(d->irq);
 }
 
 static inline unsigned long viper_irq_pending(void)
@@ -283,7 +283,7 @@
 	do {
 		/* we're in a chained irq handler,
 		 * so ack the interrupt by hand */
-		desc->chip->ack(irq);
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 		if (likely(pending)) {
 			irq = viper_bit_to_irq(__ffs(pending));
@@ -294,10 +294,10 @@
 }
 
 static struct irq_chip viper_irq_chip = {
-	.name	= "ISA",
-	.ack	= viper_ack_irq,
-	.mask	= viper_mask_irq,
-	.unmask	= viper_unmask_irq
+	.name		= "ISA",
+	.irq_ack	= viper_ack_irq,
+	.irq_mask	= viper_mask_irq,
+	.irq_unmask	= viper_unmask_irq
 };
 
 static void __init viper_init_irq(void)
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index bf034c7..f4b053b 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -83,19 +83,19 @@
 	return zeus_isa_irqs[bit] + PXA_ISA_IRQ(0);
 }
 
-static void zeus_ack_irq(unsigned int irq)
+static void zeus_ack_irq(struct irq_data *d)
 {
-	__raw_writew(zeus_irq_to_bitmask(irq), ZEUS_CPLD_ISA_IRQ);
+	__raw_writew(zeus_irq_to_bitmask(d->irq), ZEUS_CPLD_ISA_IRQ);
 }
 
-static void zeus_mask_irq(unsigned int irq)
+static void zeus_mask_irq(struct irq_data *d)
 {
-	zeus_irq_enabled_mask &= ~(zeus_irq_to_bitmask(irq));
+	zeus_irq_enabled_mask &= ~(zeus_irq_to_bitmask(d->irq));
 }
 
-static void zeus_unmask_irq(unsigned int irq)
+static void zeus_unmask_irq(struct irq_data *d)
 {
-	zeus_irq_enabled_mask |= zeus_irq_to_bitmask(irq);
+	zeus_irq_enabled_mask |= zeus_irq_to_bitmask(d->irq);
 }
 
 static inline unsigned long zeus_irq_pending(void)
@@ -111,7 +111,7 @@
 	do {
 		/* we're in a chained irq handler,
 		 * so ack the interrupt by hand */
-		desc->chip->ack(gpio_to_irq(ZEUS_ISA_GPIO));
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 		if (likely(pending)) {
 			irq = zeus_bit_to_irq(__ffs(pending));
@@ -122,10 +122,10 @@
 }
 
 static struct irq_chip zeus_irq_chip = {
-	.name	= "ISA",
-	.ack	= zeus_ack_irq,
-	.mask	= zeus_mask_irq,
-	.unmask	= zeus_unmask_irq,
+	.name		= "ISA",
+	.irq_ack	= zeus_ack_irq,
+	.irq_mask	= zeus_mask_irq,
+	.irq_unmask	= zeus_unmask_irq,
 };
 
 static void __init zeus_init_irq(void)
@@ -830,8 +830,8 @@
 	pr_info("Zeus CPLD V%dI%d\n", (system_rev & 0xf0) >> 4, (system_rev & 0x0f));
 
 	/* Fix timings for dm9000s (CS1/CS2)*/
-	msc0 = __raw_readl(MSC0) & 0x0000ffff | (dm9000_msc << 16);
-	msc1 = __raw_readl(MSC1) & 0xffff0000 | dm9000_msc;
+	msc0 = (__raw_readl(MSC0) & 0x0000ffff) | (dm9000_msc << 16);
+	msc1 = (__raw_readl(MSC1) & 0xffff0000) | dm9000_msc;
 	__raw_writel(msc0, MSC0);
 	__raw_writel(msc1, MSC1);
 
diff --git a/arch/arm/mach-rpc/irq.c b/arch/arm/mach-rpc/irq.c
index 9dd15d6..d29cd9b 100644
--- a/arch/arm/mach-rpc/irq.c
+++ b/arch/arm/mach-rpc/irq.c
@@ -6,110 +6,110 @@
 #include <asm/hardware/iomd.h>
 #include <asm/irq.h>
 
-static void iomd_ack_irq_a(unsigned int irq)
+static void iomd_ack_irq_a(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << irq;
+	mask = 1 << d->irq;
 	val = iomd_readb(IOMD_IRQMASKA);
 	iomd_writeb(val & ~mask, IOMD_IRQMASKA);
 	iomd_writeb(mask, IOMD_IRQCLRA);
 }
 
-static void iomd_mask_irq_a(unsigned int irq)
+static void iomd_mask_irq_a(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << irq;
+	mask = 1 << d->irq;
 	val = iomd_readb(IOMD_IRQMASKA);
 	iomd_writeb(val & ~mask, IOMD_IRQMASKA);
 }
 
-static void iomd_unmask_irq_a(unsigned int irq)
+static void iomd_unmask_irq_a(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << irq;
+	mask = 1 << d->irq;
 	val = iomd_readb(IOMD_IRQMASKA);
 	iomd_writeb(val | mask, IOMD_IRQMASKA);
 }
 
 static struct irq_chip iomd_a_chip = {
-	.ack	= iomd_ack_irq_a,
-	.mask	= iomd_mask_irq_a,
-	.unmask = iomd_unmask_irq_a,
+	.irq_ack	= iomd_ack_irq_a,
+	.irq_mask	= iomd_mask_irq_a,
+	.irq_unmask	= iomd_unmask_irq_a,
 };
 
-static void iomd_mask_irq_b(unsigned int irq)
+static void iomd_mask_irq_b(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_IRQMASKB);
 	iomd_writeb(val & ~mask, IOMD_IRQMASKB);
 }
 
-static void iomd_unmask_irq_b(unsigned int irq)
+static void iomd_unmask_irq_b(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_IRQMASKB);
 	iomd_writeb(val | mask, IOMD_IRQMASKB);
 }
 
 static struct irq_chip iomd_b_chip = {
-	.ack	= iomd_mask_irq_b,
-	.mask	= iomd_mask_irq_b,
-	.unmask = iomd_unmask_irq_b,
+	.irq_ack	= iomd_mask_irq_b,
+	.irq_mask	= iomd_mask_irq_b,
+	.irq_unmask	= iomd_unmask_irq_b,
 };
 
-static void iomd_mask_irq_dma(unsigned int irq)
+static void iomd_mask_irq_dma(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_DMAMASK);
 	iomd_writeb(val & ~mask, IOMD_DMAMASK);
 }
 
-static void iomd_unmask_irq_dma(unsigned int irq)
+static void iomd_unmask_irq_dma(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_DMAMASK);
 	iomd_writeb(val | mask, IOMD_DMAMASK);
 }
 
 static struct irq_chip iomd_dma_chip = {
-	.ack	= iomd_mask_irq_dma,
-	.mask	= iomd_mask_irq_dma,
-	.unmask = iomd_unmask_irq_dma,
+	.irq_ack	= iomd_mask_irq_dma,
+	.irq_mask	= iomd_mask_irq_dma,
+	.irq_unmask	= iomd_unmask_irq_dma,
 };
 
-static void iomd_mask_irq_fiq(unsigned int irq)
+static void iomd_mask_irq_fiq(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_FIQMASK);
 	iomd_writeb(val & ~mask, IOMD_FIQMASK);
 }
 
-static void iomd_unmask_irq_fiq(unsigned int irq)
+static void iomd_unmask_irq_fiq(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_FIQMASK);
 	iomd_writeb(val | mask, IOMD_FIQMASK);
 }
 
 static struct irq_chip iomd_fiq_chip = {
-	.ack	= iomd_mask_irq_fiq,
-	.mask	= iomd_mask_irq_fiq,
-	.unmask = iomd_unmask_irq_fiq,
+	.irq_ack	= iomd_mask_irq_fiq,
+	.irq_mask	= iomd_mask_irq_fiq,
+	.irq_unmask	= iomd_unmask_irq_fiq,
 };
 
 void __init rpc_init_irq(void)
diff --git a/arch/arm/mach-s3c2410/bast-irq.c b/arch/arm/mach-s3c2410/bast-irq.c
index 217b102..606cb6b 100644
--- a/arch/arm/mach-s3c2410/bast-irq.c
+++ b/arch/arm/mach-s3c2410/bast-irq.c
@@ -75,38 +75,38 @@
 static unsigned char bast_pc104_irqs[] = { 3, 5, 7, 10 };
 
 static void
-bast_pc104_mask(unsigned int irqno)
+bast_pc104_mask(struct irq_data *data)
 {
 	unsigned long temp;
 
 	temp = __raw_readb(BAST_VA_PC104_IRQMASK);
-	temp &= ~bast_pc104_irqmasks[irqno];
+	temp &= ~bast_pc104_irqmasks[data->irq];
 	__raw_writeb(temp, BAST_VA_PC104_IRQMASK);
 }
 
 static void
-bast_pc104_maskack(unsigned int irqno)
+bast_pc104_maskack(struct irq_data *data)
 {
 	struct irq_desc *desc = irq_desc + IRQ_ISA;
 
-	bast_pc104_mask(irqno);
-	desc->chip->ack(IRQ_ISA);
+	bast_pc104_mask(data);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 }
 
 static void
-bast_pc104_unmask(unsigned int irqno)
+bast_pc104_unmask(struct irq_data *data)
 {
 	unsigned long temp;
 
 	temp = __raw_readb(BAST_VA_PC104_IRQMASK);
-	temp |= bast_pc104_irqmasks[irqno];
+	temp |= bast_pc104_irqmasks[data->irq];
 	__raw_writeb(temp, BAST_VA_PC104_IRQMASK);
 }
 
 static struct irq_chip  bast_pc104_chip = {
-	.mask	     = bast_pc104_mask,
-	.unmask	     = bast_pc104_unmask,
-	.ack	     = bast_pc104_maskack
+	.irq_mask	= bast_pc104_mask,
+	.irq_unmask	= bast_pc104_unmask,
+	.irq_ack	= bast_pc104_maskack
 };
 
 static void
@@ -123,7 +123,7 @@
 		/* ack if we get an irq with nothing (ie, startup) */
 
 		desc = irq_desc + IRQ_ISA;
-		desc->chip->ack(IRQ_ISA);
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 	} else {
 		/* handle the IRQ */
 
diff --git a/arch/arm/mach-s3c2410/include/mach/irqs.h b/arch/arm/mach-s3c2410/include/mach/irqs.h
index 11bb0f0..e5a68ea 100644
--- a/arch/arm/mach-s3c2410/include/mach/irqs.h
+++ b/arch/arm/mach-s3c2410/include/mach/irqs.h
@@ -152,8 +152,8 @@
 
 #define IRQ_S3C2416_HSMMC0	S3C2410_IRQ(21)		/* S3C2416/S3C2450 */
 
-#define IRQ_HSMMC0		IRQ_S3C2443_HSMMC
-#define IRQ_HSMMC1		IRQ_S3C2416_HSMMC0
+#define IRQ_HSMMC0		IRQ_S3C2416_HSMMC0
+#define IRQ_HSMMC1		IRQ_S3C2443_HSMMC
 
 #define IRQ_S3C2443_LCD1	S3C2410_IRQSUB(14)
 #define IRQ_S3C2443_LCD2	S3C2410_IRQSUB(15)
diff --git a/arch/arm/mach-s3c2410/include/mach/map.h b/arch/arm/mach-s3c2410/include/mach/map.h
index cd3983a..25bbf5a 100644
--- a/arch/arm/mach-s3c2410/include/mach/map.h
+++ b/arch/arm/mach-s3c2410/include/mach/map.h
@@ -112,8 +112,8 @@
 #define S3C_PA_IIC          S3C2410_PA_IIC
 #define S3C_PA_UART	    S3C24XX_PA_UART
 #define S3C_PA_USBHOST	S3C2410_PA_USBHOST
-#define S3C_PA_HSMMC0	    S3C2443_PA_HSMMC
-#define S3C_PA_HSMMC1	    S3C2416_PA_HSMMC0
+#define S3C_PA_HSMMC0	    S3C2416_PA_HSMMC0
+#define S3C_PA_HSMMC1	    S3C2443_PA_HSMMC
 #define S3C_PA_WDT	    S3C2410_PA_WATCHDOG
 #define S3C_PA_NAND	    S3C24XX_PA_NAND
 
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h b/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
index 101aeea..44494a5 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
@@ -86,6 +86,7 @@
 #define S3C2443_HCLKCON_LCDC		(1<<9)
 #define S3C2443_HCLKCON_USBH		(1<<11)
 #define S3C2443_HCLKCON_USBD		(1<<12)
+#define S3C2416_HCLKCON_HSMMC0		(1<<15)
 #define S3C2443_HCLKCON_HSMMC		(1<<16)
 #define S3C2443_HCLKCON_CFC		(1<<17)
 #define S3C2443_HCLKCON_SSMC		(1<<18)
diff --git a/arch/arm/mach-s3c2412/irq.c b/arch/arm/mach-s3c2412/irq.c
index 6000ca9..eddb52b 100644
--- a/arch/arm/mach-s3c2412/irq.c
+++ b/arch/arm/mach-s3c2412/irq.c
@@ -49,9 +49,9 @@
 */
 
 static void
-s3c2412_irq_mask(unsigned int irqno)
+s3c2412_irq_mask(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 	unsigned long mask;
 
 	mask = __raw_readl(S3C2410_INTMSK);
@@ -62,9 +62,9 @@
 }
 
 static inline void
-s3c2412_irq_ack(unsigned int irqno)
+s3c2412_irq_ack(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 
 	__raw_writel(bitval, S3C2412_EINTPEND);
 	__raw_writel(bitval, S3C2410_SRCPND);
@@ -72,9 +72,9 @@
 }
 
 static inline void
-s3c2412_irq_maskack(unsigned int irqno)
+s3c2412_irq_maskack(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 	unsigned long mask;
 
 	mask = __raw_readl(S3C2410_INTMSK);
@@ -89,9 +89,9 @@
 }
 
 static void
-s3c2412_irq_unmask(unsigned int irqno)
+s3c2412_irq_unmask(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 	unsigned long mask;
 
 	mask = __raw_readl(S3C2412_EINTMASK);
@@ -102,11 +102,11 @@
 }
 
 static struct irq_chip s3c2412_irq_eint0t4 = {
-	.ack	   = s3c2412_irq_ack,
-	.mask	   = s3c2412_irq_mask,
-	.unmask	   = s3c2412_irq_unmask,
-	.set_wake  = s3c_irq_wake,
-	.set_type  = s3c_irqext_type,
+	.irq_ack	= s3c2412_irq_ack,
+	.irq_mask	= s3c2412_irq_mask,
+	.irq_unmask	= s3c2412_irq_unmask,
+	.irq_set_wake	= s3c_irq_wake,
+	.irq_set_type	= s3c_irqext_type,
 };
 
 #define INTBIT(x)	(1 << ((x) - S3C2410_IRQSUB(0)))
@@ -132,29 +132,29 @@
 #define INTMSK_CFSDI	(1UL << (IRQ_S3C2412_CFSDI - IRQ_EINT0))
 #define SUBMSK_CFSDI	INTMSK_SUB(IRQ_S3C2412_SDI, IRQ_S3C2412_CF)
 
-static void s3c2412_irq_cfsdi_mask(unsigned int irqno)
+static void s3c2412_irq_cfsdi_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_CFSDI, SUBMSK_CFSDI);
+	s3c_irqsub_mask(data->irq, INTMSK_CFSDI, SUBMSK_CFSDI);
 }
 
-static void s3c2412_irq_cfsdi_unmask(unsigned int irqno)
+static void s3c2412_irq_cfsdi_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_CFSDI);
+	s3c_irqsub_unmask(data->irq, INTMSK_CFSDI);
 }
 
-static void s3c2412_irq_cfsdi_ack(unsigned int irqno)
+static void s3c2412_irq_cfsdi_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_CFSDI, SUBMSK_CFSDI);
+	s3c_irqsub_maskack(data->irq, INTMSK_CFSDI, SUBMSK_CFSDI);
 }
 
 static struct irq_chip s3c2412_irq_cfsdi = {
 	.name		= "s3c2412-cfsdi",
-	.ack		= s3c2412_irq_cfsdi_ack,
-	.mask		= s3c2412_irq_cfsdi_mask,
-	.unmask		= s3c2412_irq_cfsdi_unmask,
+	.irq_ack	= s3c2412_irq_cfsdi_ack,
+	.irq_mask	= s3c2412_irq_cfsdi_mask,
+	.irq_unmask	= s3c2412_irq_cfsdi_unmask,
 };
 
-static int s3c2412_irq_rtc_wake(unsigned int irqno, unsigned int state)
+static int s3c2412_irq_rtc_wake(struct irq_data *data, unsigned int state)
 {
 	unsigned long pwrcfg;
 
@@ -165,7 +165,7 @@
 		pwrcfg |= S3C2412_PWRCFG_RTC_MASKIRQ;
 	__raw_writel(pwrcfg, S3C2412_PWRCFG);
 
-	return s3c_irq_chip.set_wake(irqno, state);
+	return s3c_irq_chip.irq_set_wake(data, state);
 }
 
 static struct irq_chip s3c2412_irq_rtc_chip;
@@ -193,7 +193,7 @@
 	/* change RTC IRQ's set wake method */
 
 	s3c2412_irq_rtc_chip = s3c_irq_chip;
-	s3c2412_irq_rtc_chip.set_wake = s3c2412_irq_rtc_wake;
+	s3c2412_irq_rtc_chip.irq_set_wake = s3c2412_irq_rtc_wake;
 
 	set_irq_chip(IRQ_RTC, &s3c2412_irq_rtc_chip);
 
diff --git a/arch/arm/mach-s3c2416/Kconfig b/arch/arm/mach-s3c2416/Kconfig
index df8d149..69b48a7 100644
--- a/arch/arm/mach-s3c2416/Kconfig
+++ b/arch/arm/mach-s3c2416/Kconfig
@@ -31,6 +31,17 @@
 	help
 	  Internal config node to apply S3C2416 power management
 
+config S3C2416_SETUP_SDHCI
+	bool
+	select S3C2416_SETUP_SDHCI_GPIO
+	help
+	  Internal helper functions for S3C2416 based SDHCI systems
+
+config S3C2416_SETUP_SDHCI_GPIO
+	bool
+	help
+	  Common setup code for SDHCI gpio.
+
 menu "S3C2416 Machines"
 
 config MACH_SMDK2416
@@ -42,6 +53,7 @@
 	select S3C_DEV_HSMMC1
 	select S3C_DEV_NAND
 	select S3C_DEV_USB_HOST
+	select S3C2416_SETUP_SDHCI
 	select S3C2416_PM if PM
 	help
 	  Say Y here if you are using an SMDK2416
diff --git a/arch/arm/mach-s3c2416/Makefile b/arch/arm/mach-s3c2416/Makefile
index ef038d6..7b805b2 100644
--- a/arch/arm/mach-s3c2416/Makefile
+++ b/arch/arm/mach-s3c2416/Makefile
@@ -14,6 +14,10 @@
 obj-$(CONFIG_S3C2416_PM)	+= pm.o
 #obj-$(CONFIG_S3C2416_DMA)	+= dma.o
 
+# Device setup
+obj-$(CONFIG_S3C2416_SETUP_SDHCI) += setup-sdhci.o
+obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+
 # Machine support
 
 obj-$(CONFIG_MACH_SMDK2416)	+= mach-smdk2416.o
diff --git a/arch/arm/mach-s3c2416/clock.c b/arch/arm/mach-s3c2416/clock.c
index 7ccf5a2..3b02d85 100644
--- a/arch/arm/mach-s3c2416/clock.c
+++ b/arch/arm/mach-s3c2416/clock.c
@@ -38,12 +38,11 @@
 	[7] = 8,
 };
 
-/* ID to hardware numbering, 0 is HSMMC1, 1 is HSMMC0 */
 static struct clksrc_clk hsmmc_div[] = {
 	[0] = {
 		.clk = {
 			.name	= "hsmmc-div",
-			.id	= 1,
+			.id	= 0,
 			.parent	= &clk_esysclk.clk,
 		},
 		.reg_div = { .reg = S3C2416_CLKDIV2, .size = 2, .shift = 6 },
@@ -51,7 +50,7 @@
 	[1] = {
 		.clk = {
 			.name	= "hsmmc-div",
-			.id	= 0,
+			.id	= 1,
 			.parent	= &clk_esysclk.clk,
 		},
 		.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
@@ -61,7 +60,7 @@
 static struct clksrc_clk hsmmc_mux[] = {
 	[0] = {
 		.clk	= {
-			.id	= 1,
+			.id	= 0,
 			.name	= "hsmmc-if",
 			.ctrlbit = (1 << 6),
 			.enable = s3c2443_clkcon_enable_s,
@@ -77,7 +76,7 @@
 	},
 	[1] = {
 		.clk	= {
-			.id	= 0,
+			.id	= 1,
 			.name	= "hsmmc-if",
 			.ctrlbit = (1 << 12),
 			.enable = s3c2443_clkcon_enable_s,
@@ -93,6 +92,13 @@
 	},
 };
 
+static struct clk hsmmc0_clk = {
+	.name		= "hsmmc",
+	.id		= 0,
+	.parent		= &clk_h,
+	.enable		= s3c2443_clkcon_enable_h,
+	.ctrlbit	= S3C2416_HCLKCON_HSMMC0,
+};
 
 static inline unsigned int s3c2416_fclk_div(unsigned long clkcon0)
 {
@@ -130,6 +136,8 @@
 	for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
 		s3c_register_clksrc(clksrcs[ptr], 1);
 
+	s3c24xx_register_clock(&hsmmc0_clk);
+
 	s3c_pwmclk_init();
 
 }
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
index 00174da..680fe38 100644
--- a/arch/arm/mach-s3c2416/irq.c
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -77,28 +77,27 @@
 #define INTMSK_WDTAC97	(1UL << (IRQ_WDT - IRQ_EINT0))
 #define SUBMSK_WDTAC97	INTMSK(IRQ_S3C2443_WDT, IRQ_S3C2443_AC97)
 
-static void s3c2416_irq_wdtac97_mask(unsigned int irqno)
+static void s3c2416_irq_wdtac97_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+	s3c_irqsub_mask(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97);
 }
 
-static void s3c2416_irq_wdtac97_unmask(unsigned int irqno)
+static void s3c2416_irq_wdtac97_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_WDTAC97);
+	s3c_irqsub_unmask(data->irq, INTMSK_WDTAC97);
 }
 
-static void s3c2416_irq_wdtac97_ack(unsigned int irqno)
+static void s3c2416_irq_wdtac97_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+	s3c_irqsub_maskack(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97);
 }
 
 static struct irq_chip s3c2416_irq_wdtac97 = {
-	.mask	    = s3c2416_irq_wdtac97_mask,
-	.unmask	    = s3c2416_irq_wdtac97_unmask,
-	.ack	    = s3c2416_irq_wdtac97_ack,
+	.irq_mask	= s3c2416_irq_wdtac97_mask,
+	.irq_unmask	= s3c2416_irq_wdtac97_unmask,
+	.irq_ack	= s3c2416_irq_wdtac97_ack,
 };
 
-
 /* LCD sub interrupts */
 
 static void s3c2416_irq_demux_lcd(unsigned int irq, struct irq_desc *desc)
@@ -109,28 +108,27 @@
 #define INTMSK_LCD	(1UL << (IRQ_LCD - IRQ_EINT0))
 #define SUBMSK_LCD	INTMSK(IRQ_S3C2443_LCD1, IRQ_S3C2443_LCD4)
 
-static void s3c2416_irq_lcd_mask(unsigned int irqno)
+static void s3c2416_irq_lcd_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_LCD, SUBMSK_LCD);
+	s3c_irqsub_mask(data->irq, INTMSK_LCD, SUBMSK_LCD);
 }
 
-static void s3c2416_irq_lcd_unmask(unsigned int irqno)
+static void s3c2416_irq_lcd_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_LCD);
+	s3c_irqsub_unmask(data->irq, INTMSK_LCD);
 }
 
-static void s3c2416_irq_lcd_ack(unsigned int irqno)
+static void s3c2416_irq_lcd_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_LCD, SUBMSK_LCD);
+	s3c_irqsub_maskack(data->irq, INTMSK_LCD, SUBMSK_LCD);
 }
 
 static struct irq_chip s3c2416_irq_lcd = {
-	.mask	    = s3c2416_irq_lcd_mask,
-	.unmask	    = s3c2416_irq_lcd_unmask,
-	.ack	    = s3c2416_irq_lcd_ack,
+	.irq_mask	= s3c2416_irq_lcd_mask,
+	.irq_unmask	= s3c2416_irq_lcd_unmask,
+	.irq_ack	= s3c2416_irq_lcd_ack,
 };
 
-
 /* DMA sub interrupts */
 
 static void s3c2416_irq_demux_dma(unsigned int irq, struct irq_desc *desc)
@@ -142,28 +140,27 @@
 #define SUBMSK_DMA	INTMSK(IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5)
 
 
-static void s3c2416_irq_dma_mask(unsigned int irqno)
+static void s3c2416_irq_dma_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_DMA, SUBMSK_DMA);
+	s3c_irqsub_mask(data->irq, INTMSK_DMA, SUBMSK_DMA);
 }
 
-static void s3c2416_irq_dma_unmask(unsigned int irqno)
+static void s3c2416_irq_dma_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_DMA);
+	s3c_irqsub_unmask(data->irq, INTMSK_DMA);
 }
 
-static void s3c2416_irq_dma_ack(unsigned int irqno)
+static void s3c2416_irq_dma_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_DMA, SUBMSK_DMA);
+	s3c_irqsub_maskack(data->irq, INTMSK_DMA, SUBMSK_DMA);
 }
 
 static struct irq_chip s3c2416_irq_dma = {
-	.mask	    = s3c2416_irq_dma_mask,
-	.unmask	    = s3c2416_irq_dma_unmask,
-	.ack	    = s3c2416_irq_dma_ack,
+	.irq_mask	= s3c2416_irq_dma_mask,
+	.irq_unmask	= s3c2416_irq_dma_unmask,
+	.irq_ack	= s3c2416_irq_dma_ack,
 };
 
-
 /* UART3 sub interrupts */
 
 static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
@@ -174,28 +171,27 @@
 #define INTMSK_UART3	(1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
 #define SUBMSK_UART3	(0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
 
-static void s3c2416_irq_uart3_mask(unsigned int irqno)
+static void s3c2416_irq_uart3_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART3, SUBMSK_UART3);
+	s3c_irqsub_mask(data->irq, INTMSK_UART3, SUBMSK_UART3);
 }
 
-static void s3c2416_irq_uart3_unmask(unsigned int irqno)
+static void s3c2416_irq_uart3_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART3);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART3);
 }
 
-static void s3c2416_irq_uart3_ack(unsigned int irqno)
+static void s3c2416_irq_uart3_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART3, SUBMSK_UART3);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART3, SUBMSK_UART3);
 }
 
 static struct irq_chip s3c2416_irq_uart3 = {
-	.mask	    = s3c2416_irq_uart3_mask,
-	.unmask	    = s3c2416_irq_uart3_unmask,
-	.ack	    = s3c2416_irq_uart3_ack,
+	.irq_mask	= s3c2416_irq_uart3_mask,
+	.irq_unmask	= s3c2416_irq_uart3_unmask,
+	.irq_ack	= s3c2416_irq_uart3_ack,
 };
 
-
 /* IRQ initialisation code */
 
 static int __init s3c2416_add_sub(unsigned int base,
diff --git a/arch/arm/mach-s3c2416/mach-smdk2416.c b/arch/arm/mach-s3c2416/mach-smdk2416.c
index 7fc3664..3f83177 100644
--- a/arch/arm/mach-s3c2416/mach-smdk2416.c
+++ b/arch/arm/mach-s3c2416/mach-smdk2416.c
@@ -46,6 +46,7 @@
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/nand.h>
+#include <plat/sdhci.h>
 
 #include <plat/regs-fb-v4.h>
 #include <plat/fb.h>
@@ -110,6 +111,13 @@
 		.ucon	     = UCON,
 		.ulcon	     = ULCON | 0x50,
 		.ufcon	     = UFCON,
+	},
+	[3] = {
+		.hwport	     = 3,
+		.flags	     = 0,
+		.ucon	     = UCON,
+		.ulcon	     = ULCON,
+		.ufcon	     = UFCON,
 	}
 };
 
@@ -159,6 +167,18 @@
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
 };
 
+static struct s3c_sdhci_platdata smdk2416_hsmmc0_pdata __initdata = {
+	.max_width		= 4,
+	.cd_type		= S3C_SDHCI_CD_GPIO,
+	.ext_cd_gpio		= S3C2410_GPF(1),
+	.ext_cd_gpio_invert	= 1,
+};
+
+static struct s3c_sdhci_platdata smdk2416_hsmmc1_pdata __initdata = {
+	.max_width		= 4,
+	.cd_type		= S3C_SDHCI_CD_NONE,
+};
+
 static struct platform_device *smdk2416_devices[] __initdata = {
 	&s3c_device_fb,
 	&s3c_device_wdt,
@@ -180,6 +200,9 @@
 	s3c_i2c0_set_platdata(NULL);
 	s3c_fb_set_platdata(&smdk2416_fb_platdata);
 
+	s3c_sdhci0_set_platdata(&smdk2416_hsmmc0_pdata);
+	s3c_sdhci1_set_platdata(&smdk2416_hsmmc1_pdata);
+
 	gpio_request(S3C2410_GPB(4), "USBHost Power");
 	gpio_direction_output(S3C2410_GPB(4), 1);
 
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
index 63f39cd..ba7fd87 100644
--- a/arch/arm/mach-s3c2416/s3c2416.c
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -53,6 +53,7 @@
 #include <plat/s3c2416.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
+#include <plat/sdhci.h>
 
 #include <plat/iic-core.h>
 #include <plat/fb-core.h>
@@ -115,6 +116,10 @@
 	s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_updown;
 	s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_updown;
 
+	/* initialize device information early */
+	s3c2416_default_sdhci0();
+	s3c2416_default_sdhci1();
+
 	iotable_init(s3c2416_iodesc, ARRAY_SIZE(s3c2416_iodesc));
 }
 
diff --git a/arch/arm/mach-s3c2416/setup-sdhci-gpio.c b/arch/arm/mach-s3c2416/setup-sdhci-gpio.c
new file mode 100644
index 0000000..f65cb3e
--- /dev/null
+++ b/arch/arm/mach-s3c2416/setup-sdhci-gpio.c
@@ -0,0 +1,34 @@
+/* linux/arch/arm/plat-s3c2416/setup-sdhci-gpio.c
+ *
+ * Copyright 2010 Promwad Innovation Company
+ *	Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * S3C2416 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
+ *
+ * Based on mach-s3c64xx/setup-sdhci-gpio.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/regs-gpio.h>
+#include <plat/gpio-cfg.h>
+
+void s3c2416_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
+{
+	s3c_gpio_cfgrange_nopull(S3C2410_GPE(5), 2 + width, S3C_GPIO_SFN(2));
+}
+
+void s3c2416_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
+{
+	s3c_gpio_cfgrange_nopull(S3C2410_GPL(0), width, S3C_GPIO_SFN(2));
+	s3c_gpio_cfgrange_nopull(S3C2410_GPL(8), 2, S3C_GPIO_SFN(2));
+}
diff --git a/arch/arm/mach-s3c2416/setup-sdhci.c b/arch/arm/mach-s3c2416/setup-sdhci.c
new file mode 100644
index 0000000..ed34fad
--- /dev/null
+++ b/arch/arm/mach-s3c2416/setup-sdhci.c
@@ -0,0 +1,61 @@
+/* linux/arch/arm/mach-s3c2416/setup-sdhci.c
+ *
+ * Copyright 2010 Promwad Innovation Company
+ *	Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * S3C2416 - Helper functions for settign up SDHCI device(s) (HSMMC)
+ *
+ * Based on mach-s3c64xx/setup-sdhci.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include <plat/regs-sdhci.h>
+#include <plat/sdhci.h>
+
+/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
+
+char *s3c2416_hsmmc_clksrcs[4] = {
+	[0] = "hsmmc",
+	[1] = "hsmmc",
+	[2] = "hsmmc-if",
+	/* [3] = "48m", - note not successfully used yet */
+};
+
+void s3c2416_setup_sdhci_cfg_card(struct platform_device *dev,
+				  void __iomem *r,
+				  struct mmc_ios *ios,
+				  struct mmc_card *card)
+{
+	u32 ctrl2, ctrl3;
+
+	ctrl2 = __raw_readl(r + S3C_SDHCI_CONTROL2);
+	ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+	ctrl2 |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR |
+		  S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK |
+		  S3C_SDHCI_CTRL2_ENFBCLKRX |
+		  S3C_SDHCI_CTRL2_DFCNT_NONE |
+		  S3C_SDHCI_CTRL2_ENCLKOUTHOLD);
+
+	if (ios->clock < 25 * 1000000)
+		ctrl3 = (S3C_SDHCI_CTRL3_FCSEL3 |
+			 S3C_SDHCI_CTRL3_FCSEL2 |
+			 S3C_SDHCI_CTRL3_FCSEL1 |
+			 S3C_SDHCI_CTRL3_FCSEL0);
+	else
+		ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
+
+	__raw_writel(ctrl2, r + S3C_SDHCI_CONTROL2);
+	__raw_writel(ctrl3, r + S3C_SDHCI_CONTROL3);
+}
diff --git a/arch/arm/mach-s3c2440/irq.c b/arch/arm/mach-s3c2440/irq.c
index 0c049b9..acad442 100644
--- a/arch/arm/mach-s3c2440/irq.c
+++ b/arch/arm/mach-s3c2440/irq.c
@@ -69,27 +69,27 @@
 #define INTMSK_WDT	 (1UL << (IRQ_WDT - IRQ_EINT0))
 
 static void
-s3c_irq_wdtac97_mask(unsigned int irqno)
+s3c_irq_wdtac97_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_WDT, 3<<13);
+	s3c_irqsub_mask(data->irq, INTMSK_WDT, 3 << 13);
 }
 
 static void
-s3c_irq_wdtac97_unmask(unsigned int irqno)
+s3c_irq_wdtac97_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_WDT);
+	s3c_irqsub_unmask(data->irq, INTMSK_WDT);
 }
 
 static void
-s3c_irq_wdtac97_ack(unsigned int irqno)
+s3c_irq_wdtac97_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_WDT, 3<<13);
+	s3c_irqsub_maskack(data->irq, INTMSK_WDT, 3 << 13);
 }
 
 static struct irq_chip s3c_irq_wdtac97 = {
-	.mask	    = s3c_irq_wdtac97_mask,
-	.unmask	    = s3c_irq_wdtac97_unmask,
-	.ack	    = s3c_irq_wdtac97_ack,
+	.irq_mask	= s3c_irq_wdtac97_mask,
+	.irq_unmask	= s3c_irq_wdtac97_unmask,
+	.irq_ack	= s3c_irq_wdtac97_ack,
 };
 
 static int s3c2440_irq_add(struct sys_device *sysdev)
diff --git a/arch/arm/mach-s3c2440/s3c244x-irq.c b/arch/arm/mach-s3c2440/s3c244x-irq.c
index a75c0c2..83daf4e 100644
--- a/arch/arm/mach-s3c2440/s3c244x-irq.c
+++ b/arch/arm/mach-s3c2440/s3c244x-irq.c
@@ -68,27 +68,27 @@
 #define INTMSK_CAM (1UL << (IRQ_CAM - IRQ_EINT0))
 
 static void
-s3c_irq_cam_mask(unsigned int irqno)
+s3c_irq_cam_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_CAM, 3<<11);
+	s3c_irqsub_mask(data->irq, INTMSK_CAM, 3 << 11);
 }
 
 static void
-s3c_irq_cam_unmask(unsigned int irqno)
+s3c_irq_cam_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_CAM);
+	s3c_irqsub_unmask(data->irq, INTMSK_CAM);
 }
 
 static void
-s3c_irq_cam_ack(unsigned int irqno)
+s3c_irq_cam_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_CAM, 3<<11);
+	s3c_irqsub_maskack(data->irq, INTMSK_CAM, 3 << 11);
 }
 
 static struct irq_chip s3c_irq_cam = {
-	.mask	    = s3c_irq_cam_mask,
-	.unmask	    = s3c_irq_cam_unmask,
-	.ack	    = s3c_irq_cam_ack,
+	.irq_mask	= s3c_irq_cam_mask,
+	.irq_unmask	= s3c_irq_cam_unmask,
+	.irq_ack	= s3c_irq_cam_ack,
 };
 
 static int s3c244x_irq_add(struct sys_device *sysdev)
diff --git a/arch/arm/mach-s3c2443/Kconfig b/arch/arm/mach-s3c2443/Kconfig
index 31babec..d8eb868 100644
--- a/arch/arm/mach-s3c2443/Kconfig
+++ b/arch/arm/mach-s3c2443/Kconfig
@@ -10,6 +10,7 @@
 	select CPU_LLSERIAL_S3C2440
 	select SAMSUNG_CLKSRC
 	select S3C2443_CLOCK
+	select S3C_GPIO_PULL_S3C2443
 	help
 	  Support for the S3C2443 SoC from the S3C24XX line
 
@@ -25,7 +26,7 @@
 	bool "SMDK2443"
 	select CPU_S3C2443
 	select MACH_SMDK
-	select S3C_DEV_HSMMC
+	select S3C_DEV_HSMMC1
 	help
 	  Say Y here if you are using an SMDK2443
 
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 0c3c0c8..f4ec6d5 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -196,7 +196,7 @@
 static struct clksrc_clk clk_hsmmc_div = {
 	.clk	= {
 		.name		= "hsmmc-div",
-		.id		= -1,
+		.id		= 1,
 		.parent		= &clk_esysclk.clk,
 	},
 	.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
@@ -231,7 +231,7 @@
 
 static struct clk clk_hsmmc = {
 	.name		= "hsmmc-if",
-	.id		= -1,
+	.id		= 1,
 	.parent		= &clk_hsmmc_div.clk,
 	.enable		= s3c2443_enable_hsmmc,
 	.ops		= &(struct clk_ops) {
diff --git a/arch/arm/mach-s3c2443/irq.c b/arch/arm/mach-s3c2443/irq.c
index 8934247..c7820f9 100644
--- a/arch/arm/mach-s3c2443/irq.c
+++ b/arch/arm/mach-s3c2443/irq.c
@@ -75,28 +75,27 @@
 #define INTMSK_WDTAC97	(1UL << (IRQ_WDT - IRQ_EINT0))
 #define SUBMSK_WDTAC97	INTMSK(IRQ_S3C2443_WDT, IRQ_S3C2443_AC97)
 
-static void s3c2443_irq_wdtac97_mask(unsigned int irqno)
+static void s3c2443_irq_wdtac97_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+	s3c_irqsub_mask(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97);
 }
 
-static void s3c2443_irq_wdtac97_unmask(unsigned int irqno)
+static void s3c2443_irq_wdtac97_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_WDTAC97);
+	s3c_irqsub_unmask(data->irq, INTMSK_WDTAC97);
 }
 
-static void s3c2443_irq_wdtac97_ack(unsigned int irqno)
+static void s3c2443_irq_wdtac97_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+	s3c_irqsub_maskack(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97);
 }
 
 static struct irq_chip s3c2443_irq_wdtac97 = {
-	.mask	    = s3c2443_irq_wdtac97_mask,
-	.unmask	    = s3c2443_irq_wdtac97_unmask,
-	.ack	    = s3c2443_irq_wdtac97_ack,
+	.irq_mask	= s3c2443_irq_wdtac97_mask,
+	.irq_unmask	= s3c2443_irq_wdtac97_unmask,
+	.irq_ack	= s3c2443_irq_wdtac97_ack,
 };
 
-
 /* LCD sub interrupts */
 
 static void s3c2443_irq_demux_lcd(unsigned int irq, struct irq_desc *desc)
@@ -107,28 +106,27 @@
 #define INTMSK_LCD	(1UL << (IRQ_LCD - IRQ_EINT0))
 #define SUBMSK_LCD	INTMSK(IRQ_S3C2443_LCD1, IRQ_S3C2443_LCD4)
 
-static void s3c2443_irq_lcd_mask(unsigned int irqno)
+static void s3c2443_irq_lcd_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_LCD, SUBMSK_LCD);
+	s3c_irqsub_mask(data->irq, INTMSK_LCD, SUBMSK_LCD);
 }
 
-static void s3c2443_irq_lcd_unmask(unsigned int irqno)
+static void s3c2443_irq_lcd_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_LCD);
+	s3c_irqsub_unmask(data->irq, INTMSK_LCD);
 }
 
-static void s3c2443_irq_lcd_ack(unsigned int irqno)
+static void s3c2443_irq_lcd_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_LCD, SUBMSK_LCD);
+	s3c_irqsub_maskack(data->irq, INTMSK_LCD, SUBMSK_LCD);
 }
 
 static struct irq_chip s3c2443_irq_lcd = {
-	.mask	    = s3c2443_irq_lcd_mask,
-	.unmask	    = s3c2443_irq_lcd_unmask,
-	.ack	    = s3c2443_irq_lcd_ack,
+	.irq_mask	= s3c2443_irq_lcd_mask,
+	.irq_unmask	= s3c2443_irq_lcd_unmask,
+	.irq_ack	= s3c2443_irq_lcd_ack,
 };
 
-
 /* DMA sub interrupts */
 
 static void s3c2443_irq_demux_dma(unsigned int irq, struct irq_desc *desc)
@@ -139,29 +137,27 @@
 #define INTMSK_DMA	(1UL << (IRQ_S3C2443_DMA - IRQ_EINT0))
 #define SUBMSK_DMA	INTMSK(IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5)
 
-
-static void s3c2443_irq_dma_mask(unsigned int irqno)
+static void s3c2443_irq_dma_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_DMA, SUBMSK_DMA);
+	s3c_irqsub_mask(data->irq, INTMSK_DMA, SUBMSK_DMA);
 }
 
-static void s3c2443_irq_dma_unmask(unsigned int irqno)
+static void s3c2443_irq_dma_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_DMA);
+	s3c_irqsub_unmask(data->irq, INTMSK_DMA);
 }
 
-static void s3c2443_irq_dma_ack(unsigned int irqno)
+static void s3c2443_irq_dma_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_DMA, SUBMSK_DMA);
+	s3c_irqsub_maskack(data->irq, INTMSK_DMA, SUBMSK_DMA);
 }
 
 static struct irq_chip s3c2443_irq_dma = {
-	.mask	    = s3c2443_irq_dma_mask,
-	.unmask	    = s3c2443_irq_dma_unmask,
-	.ack	    = s3c2443_irq_dma_ack,
+	.irq_mask	= s3c2443_irq_dma_mask,
+	.irq_unmask	= s3c2443_irq_dma_unmask,
+	.irq_ack	= s3c2443_irq_dma_ack,
 };
 
-
 /* UART3 sub interrupts */
 
 static void s3c2443_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
@@ -172,28 +168,27 @@
 #define INTMSK_UART3	(1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
 #define SUBMSK_UART3	(0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
 
-static void s3c2443_irq_uart3_mask(unsigned int irqno)
+static void s3c2443_irq_uart3_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART3, SUBMSK_UART3);
+	s3c_irqsub_mask(data->irq, INTMSK_UART3, SUBMSK_UART3);
 }
 
-static void s3c2443_irq_uart3_unmask(unsigned int irqno)
+static void s3c2443_irq_uart3_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART3);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART3);
 }
 
-static void s3c2443_irq_uart3_ack(unsigned int irqno)
+static void s3c2443_irq_uart3_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART3, SUBMSK_UART3);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART3, SUBMSK_UART3);
 }
 
 static struct irq_chip s3c2443_irq_uart3 = {
-	.mask	    = s3c2443_irq_uart3_mask,
-	.unmask	    = s3c2443_irq_uart3_unmask,
-	.ack	    = s3c2443_irq_uart3_ack,
+	.irq_mask	= s3c2443_irq_uart3_mask,
+	.irq_unmask	= s3c2443_irq_uart3_unmask,
+	.irq_ack	= s3c2443_irq_uart3_ack,
 };
 
-
 /* CAM sub interrupts */
 
 static void s3c2443_irq_demux_cam(unsigned int irq, struct irq_desc *desc)
@@ -204,25 +199,25 @@
 #define INTMSK_CAM	(1UL << (IRQ_CAM - IRQ_EINT0))
 #define SUBMSK_CAM	INTMSK(IRQ_S3C2440_CAM_C, IRQ_S3C2440_CAM_P)
 
-static void s3c2443_irq_cam_mask(unsigned int irqno)
+static void s3c2443_irq_cam_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_CAM, SUBMSK_CAM);
+	s3c_irqsub_mask(data->irq, INTMSK_CAM, SUBMSK_CAM);
 }
 
-static void s3c2443_irq_cam_unmask(unsigned int irqno)
+static void s3c2443_irq_cam_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_CAM);
+	s3c_irqsub_unmask(data->irq, INTMSK_CAM);
 }
 
-static void s3c2443_irq_cam_ack(unsigned int irqno)
+static void s3c2443_irq_cam_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_CAM, SUBMSK_CAM);
+	s3c_irqsub_maskack(data->irq, INTMSK_CAM, SUBMSK_CAM);
 }
 
 static struct irq_chip s3c2443_irq_cam = {
-	.mask	    = s3c2443_irq_cam_mask,
-	.unmask	    = s3c2443_irq_cam_unmask,
-	.ack	    = s3c2443_irq_cam_ack,
+	.irq_mask	= s3c2443_irq_cam_mask,
+	.irq_unmask	= s3c2443_irq_cam_unmask,
+	.irq_ack	= s3c2443_irq_cam_ack,
 };
 
 /* IRQ initialisation code */
diff --git a/arch/arm/mach-s3c2443/mach-smdk2443.c b/arch/arm/mach-s3c2443/mach-smdk2443.c
index 4337f0a..514275e 100644
--- a/arch/arm/mach-s3c2443/mach-smdk2443.c
+++ b/arch/arm/mach-s3c2443/mach-smdk2443.c
@@ -99,13 +99,20 @@
 		.ucon	     = 0x3c5,
 		.ulcon	     = 0x43,
 		.ufcon	     = 0x51,
+	},
+	[3] = {
+		.hwport	     = 3,
+		.flags	     = 0,
+		.ucon	     = 0x3c5,
+		.ulcon	     = 0x03,
+		.ufcon	     = 0x51,
 	}
 };
 
 static struct platform_device *smdk2443_devices[] __initdata = {
 	&s3c_device_wdt,
 	&s3c_device_i2c0,
-	&s3c_device_hsmmc0,
+	&s3c_device_hsmmc1,
 #ifdef CONFIG_SND_SOC_SMDK2443_WM9710
 	&s3c_device_ac97,
 #endif
diff --git a/arch/arm/mach-s3c2443/s3c2443.c b/arch/arm/mach-s3c2443/s3c2443.c
index 33d18dd..e6a28ba 100644
--- a/arch/arm/mach-s3c2443/s3c2443.c
+++ b/arch/arm/mach-s3c2443/s3c2443.c
@@ -16,6 +16,7 @@
 #include <linux/list.h>
 #include <linux/timer.h>
 #include <linux/init.h>
+#include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/serial_core.h>
 #include <linux/sysdev.h>
@@ -32,6 +33,9 @@
 #include <mach/regs-s3c2443-clock.h>
 #include <mach/reset.h>
 
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
 #include <plat/s3c2443.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
@@ -86,6 +90,9 @@
 
 void __init s3c2443_map_io(void)
 {
+	s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_s3c2443;
+	s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_s3c2443;
+
 	iotable_init(s3c2443_iodesc, ARRAY_SIZE(s3c2443_iodesc));
 }
 
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 1c98d2ff..dd37820 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -127,7 +127,7 @@
 	return s3c64xx_gate(S3C_SCLK_GATE, clk, enable);
 }
 
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "nand",
 		.id		= -1,
@@ -834,10 +834,6 @@
 void __init s3c64xx_register_clocks(unsigned long xtal, 
 				    unsigned armclk_divlimit)
 {
-	struct clk *clkp;
-	int ret;
-	int ptr;
-
 	armclk_mask = armclk_divlimit;
 
 	s3c24xx_register_baseclocks(xtal);
@@ -845,17 +841,8 @@
 
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c24xx_register_clocks(clks1, ARRAY_SIZE(clks1));
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index 372ea68..135db1b 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -212,6 +212,7 @@
 
 	config = readl(chan->regs + PL080S_CH_CONFIG);
 	config |= PL080_CONFIG_ENABLE;
+	config &= ~PL080_CONFIG_HALT;
 
 	pr_debug("%s: writing config %08x\n", __func__, config);
 	writel(config, chan->regs + PL080S_CH_CONFIG);
diff --git a/arch/arm/mach-s3c64xx/irq-eint.c b/arch/arm/mach-s3c64xx/irq-eint.c
index 5682d6a..2ead818 100644
--- a/arch/arm/mach-s3c64xx/irq-eint.c
+++ b/arch/arm/mach-s3c64xx/irq-eint.c
@@ -30,41 +30,41 @@
 #include <plat/pm.h>
 
 #define eint_offset(irq)	((irq) - IRQ_EINT(0))
-#define eint_irq_to_bit(irq)	(1 << eint_offset(irq))
+#define eint_irq_to_bit(irq)	((u32)(1 << eint_offset(irq)))
 
-static inline void s3c_irq_eint_mask(unsigned int irq)
+static inline void s3c_irq_eint_mask(struct irq_data *data)
 {
 	u32 mask;
 
 	mask = __raw_readl(S3C64XX_EINT0MASK);
-	mask |= eint_irq_to_bit(irq);
+	mask |= (u32)data->chip_data;
 	__raw_writel(mask, S3C64XX_EINT0MASK);
 }
 
-static void s3c_irq_eint_unmask(unsigned int irq)
+static void s3c_irq_eint_unmask(struct irq_data *data)
 {
 	u32 mask;
 
 	mask = __raw_readl(S3C64XX_EINT0MASK);
-	mask &= ~eint_irq_to_bit(irq);
+	mask &= ~((u32)data->chip_data);
 	__raw_writel(mask, S3C64XX_EINT0MASK);
 }
 
-static inline void s3c_irq_eint_ack(unsigned int irq)
+static inline void s3c_irq_eint_ack(struct irq_data *data)
 {
-	__raw_writel(eint_irq_to_bit(irq), S3C64XX_EINT0PEND);
+	__raw_writel((u32)data->chip_data, S3C64XX_EINT0PEND);
 }
 
-static void s3c_irq_eint_maskack(unsigned int irq)
+static void s3c_irq_eint_maskack(struct irq_data *data)
 {
 	/* compiler should in-line these */
-	s3c_irq_eint_mask(irq);
-	s3c_irq_eint_ack(irq);
+	s3c_irq_eint_mask(data);
+	s3c_irq_eint_ack(data);
 }
 
-static int s3c_irq_eint_set_type(unsigned int irq, unsigned int type)
+static int s3c_irq_eint_set_type(struct irq_data *data, unsigned int type)
 {
-	int offs = eint_offset(irq);
+	int offs = eint_offset(data->irq);
 	int pin, pin_val;
 	int shift;
 	u32 ctrl, mask;
@@ -140,12 +140,12 @@
 
 static struct irq_chip s3c_irq_eint = {
 	.name		= "s3c-eint",
-	.mask		= s3c_irq_eint_mask,
-	.unmask		= s3c_irq_eint_unmask,
-	.mask_ack	= s3c_irq_eint_maskack,
-	.ack		= s3c_irq_eint_ack,
-	.set_type	= s3c_irq_eint_set_type,
-	.set_wake	= s3c_irqext_wake,
+	.irq_mask	= s3c_irq_eint_mask,
+	.irq_unmask	= s3c_irq_eint_unmask,
+	.irq_mask_ack	= s3c_irq_eint_maskack,
+	.irq_ack	= s3c_irq_eint_ack,
+	.irq_set_type	= s3c_irq_eint_set_type,
+	.irq_set_wake	= s3c_irqext_wake,
 };
 
 /* s3c_irq_demux_eint
@@ -198,6 +198,7 @@
 
 	for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) {
 		set_irq_chip(irq, &s3c_irq_eint);
+		set_irq_chip_data(irq, (void *)eint_irq_to_bit(irq));
 		set_irq_handler(irq, handle_level_irq);
 		set_irq_flags(irq, IRQF_VALID);
 	}
diff --git a/arch/arm/mach-s5p6442/clock.c b/arch/arm/mach-s5p6442/clock.c
index 16d6e7e..fbbc7be 100644
--- a/arch/arm/mach-s5p6442/clock.c
+++ b/arch/arm/mach-s5p6442/clock.c
@@ -340,7 +340,7 @@
 	clk_pclkd1.rate = pclkd1;
 }
 
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "pdma",
 		.id		= -1,
@@ -408,23 +408,13 @@
 
 void __init s5p6442_register_clocks(void)
 {
-	struct clk *clkptr;
-	int i, ret;
-
 	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
 
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkptr = init_clocks_disable;
-	for (i = 0; i < ARRAY_SIZE(init_clocks_disable); i++, clkptr++) {
-		ret = s3c24xx_register_clock(clkptr);
-		if (ret < 0) {
-			printk(KERN_ERR "Fail to register clock %s (%d)\n",
-					clkptr->name, ret);
-		} else
-			(clkptr->enable)(clkptr, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h
index 31fb2e68..203dd5a 100644
--- a/arch/arm/mach-s5p6442/include/mach/map.h
+++ b/arch/arm/mach-s5p6442/include/mach/map.h
@@ -28,6 +28,9 @@
 #define S5P6442_PA_VIC1		(0xE4100000)
 #define S5P6442_PA_VIC2		(0xE4200000)
 
+#define S5P6442_PA_SROMC	(0xE7000000)
+#define S5P_PA_SROMC		S5P6442_PA_SROMC
+
 #define S5P6442_PA_MDMA		0xE8000000
 #define S5P6442_PA_PDMA		0xE9000000
 
diff --git a/arch/arm/mach-s5p6442/mach-smdk6442.c b/arch/arm/mach-s5p6442/mach-smdk6442.c
index 819fd80..e69f137 100644
--- a/arch/arm/mach-s5p6442/mach-smdk6442.c
+++ b/arch/arm/mach-s5p6442/mach-smdk6442.c
@@ -12,6 +12,7 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/serial_core.h>
+#include <linux/i2c.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -25,6 +26,7 @@
 #include <plat/s5p6442.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
+#include <plat/iic.h>
 
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDK6442_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
@@ -65,10 +67,15 @@
 };
 
 static struct platform_device *smdk6442_devices[] __initdata = {
+	&s3c_device_i2c0,
 	&s5p6442_device_iis0,
 	&s3c_device_wdt,
 };
 
+static struct i2c_board_info smdk6442_i2c_devs0[] __initdata = {
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
+};
+
 static void __init smdk6442_map_io(void)
 {
 	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -78,6 +85,9 @@
 
 static void __init smdk6442_machine_init(void)
 {
+	s3c_i2c0_set_platdata(NULL);
+	i2c_register_board_info(0, smdk6442_i2c_devs0,
+			ARRAY_SIZE(smdk6442_i2c_devs0));
 	platform_add_devices(smdk6442_devices, ARRAY_SIZE(smdk6442_devices));
 }
 
diff --git a/arch/arm/mach-s5p6442/setup-i2c0.c b/arch/arm/mach-s5p6442/setup-i2c0.c
index 662695d..aad8565 100644
--- a/arch/arm/mach-s5p6442/setup-i2c0.c
+++ b/arch/arm/mach-s5p6442/setup-i2c0.c
@@ -14,12 +14,15 @@
 
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/gpio.h>
 
 struct platform_device; /* don't need the contents */
 
+#include <plat/gpio-cfg.h>
 #include <plat/iic.h>
 
 void s3c_i2c0_cfg_gpio(struct platform_device *dev)
 {
-	/* Will be populated later */
+	s3c_gpio_cfgall_range(S5P6442_GPD1(0), 2,
+			      S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
 }
diff --git a/arch/arm/mach-s5p64x0/Makefile b/arch/arm/mach-s5p64x0/Makefile
index 2655829..ae6bf6f 100644
--- a/arch/arm/mach-s5p64x0/Makefile
+++ b/arch/arm/mach-s5p64x0/Makefile
@@ -12,9 +12,9 @@
 
 # Core support for S5P64X0 system
 
-obj-$(CONFIG_ARCH_S5P64X0)	+= cpu.o init.o clock.o dma.o
+obj-$(CONFIG_ARCH_S5P64X0)	+= cpu.o init.o clock.o dma.o gpiolib.o
 obj-$(CONFIG_ARCH_S5P64X0)	+= setup-i2c0.o
-obj-$(CONFIG_CPU_S5P6440)	+= clock-s5p6440.o gpio.o
+obj-$(CONFIG_CPU_S5P6440)	+= clock-s5p6440.o
 obj-$(CONFIG_CPU_S5P6450)	+= clock-s5p6450.o
 
 # machine support
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index 409c5fc..9f12c2e 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -133,7 +133,7 @@
  * recommended to keep the following clocks disabled until the driver requests
  * for enabling the clock.
  */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "nand",
 		.id		= -1,
@@ -419,7 +419,7 @@
 static struct clksrc_clk clksrcs[] = {
 	{
 		.clk	= {
-			.name		= "mmc_bus",
+			.name		= "sclk_mmc",
 			.id		= 0,
 			.ctrlbit	= (1 << 24),
 			.enable		= s5p64x0_sclk_ctrl,
@@ -429,7 +429,7 @@
 		.reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 0, .size = 4 },
 	}, {
 		.clk	= {
-			.name		= "mmc_bus",
+			.name		= "sclk_mmc",
 			.id		= 1,
 			.ctrlbit	= (1 << 25),
 			.enable		= s5p64x0_sclk_ctrl,
@@ -439,7 +439,7 @@
 		.reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 4, .size = 4 },
 	}, {
 		.clk	= {
-			.name		= "mmc_bus",
+			.name		= "sclk_mmc",
 			.id		= 2,
 			.ctrlbit	= (1 << 26),
 			.enable		= s5p64x0_sclk_ctrl,
@@ -602,8 +602,6 @@
 
 void __init s5p6440_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
 	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
@@ -614,16 +612,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index 7fc6abd..4eec457 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -181,7 +181,7 @@
  * recommended to keep the following clocks disabled until the driver requests
  * for enabling the clock.
  */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "usbhost",
 		.id		= -1,
@@ -231,6 +231,12 @@
 		.enable		= s5p64x0_pclk_ctrl,
 		.ctrlbit	= (1 << 5),
 	}, {
+		.name		= "rtc",
+		.id		= -1,
+		.parent		= &clk_pclk_low.clk,
+		.enable		= s5p64x0_pclk_ctrl,
+		.ctrlbit	= (1 << 6),
+	}, {
 		.name		= "adc",
 		.id		= -1,
 		.parent		= &clk_pclk_low.clk,
@@ -261,6 +267,18 @@
 		.enable		= s5p64x0_pclk_ctrl,
 		.ctrlbit	= (1 << 26),
 	}, {
+		.name		= "iis",
+		.id		= 1,
+		.parent		= &clk_pclk_low.clk,
+		.enable		= s5p64x0_pclk_ctrl,
+		.ctrlbit	= (1 << 15),
+	}, {
+		.name		= "iis",
+		.id		= 2,
+		.parent		= &clk_pclk_low.clk,
+		.enable		= s5p64x0_pclk_ctrl,
+		.ctrlbit	= (1 << 16),
+	}, {
 		.name		= "i2c",
 		.id		= 1,
 		.parent		= &clk_pclk_low.clk,
@@ -633,8 +651,6 @@
 
 void __init s5p6450_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
 	for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
@@ -643,16 +659,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5p64x0/dev-audio.c b/arch/arm/mach-s5p64x0/dev-audio.c
index 14f89e73..35f1f22 100644
--- a/arch/arm/mach-s5p64x0/dev-audio.c
+++ b/arch/arm/mach-s5p64x0/dev-audio.c
@@ -24,13 +24,13 @@
 	[1] = "sclk_audio2",
 };
 
-static int s5p64x0_cfg_i2s(struct platform_device *pdev)
+static int s5p6440_cfg_i2s(struct platform_device *pdev)
 {
-	/* configure GPIO for i2s port */
 	switch (pdev->id) {
 	case 0:
-		s3c_gpio_cfgpin_range(S5P6440_GPR(4), 5, S3C_GPIO_SFN(5));
-		s3c_gpio_cfgpin_range(S5P6440_GPR(13), 2, S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S5P6440_GPC(4), 2, S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin(S5P6440_GPC(7), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S5P6440_GPH(6), 4, S3C_GPIO_SFN(5));
 		break;
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
@@ -40,8 +40,8 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s5p64x0_i2s_pdata = {
-	.cfg_gpio = s5p64x0_cfg_i2s,
+static struct s3c_audio_pdata s5p6440_i2s_pdata = {
+	.cfg_gpio = s5p6440_cfg_i2s,
 	.type = {
 		.i2s = {
 			.quirks = QUIRK_PRI_6CHAN,
@@ -50,7 +50,7 @@
 	},
 };
 
-static struct resource s5p64x0_iis0_resource[] = {
+static struct resource s5p64x0_i2s0_resource[] = {
 	[0] = {
 		.start	= S5P64X0_PA_I2S,
 		.end	= S5P64X0_PA_I2S + 0x100 - 1,
@@ -71,20 +71,117 @@
 struct platform_device s5p6440_device_iis = {
 	.name		= "samsung-i2s",
 	.id		= 0,
-	.num_resources	= ARRAY_SIZE(s5p64x0_iis0_resource),
-	.resource	= s5p64x0_iis0_resource,
+	.num_resources	= ARRAY_SIZE(s5p64x0_i2s0_resource),
+	.resource	= s5p64x0_i2s0_resource,
 	.dev = {
-		.platform_data = &s5p64x0_i2s_pdata,
+		.platform_data = &s5p6440_i2s_pdata,
+	},
+};
+
+static int s5p6450_cfg_i2s(struct platform_device *pdev)
+{
+	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5P6450_GPR(4), 5, S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S5P6450_GPR(13), 2, S3C_GPIO_SFN(5));
+		break;
+	case 1:
+		s3c_gpio_cfgpin(S5P6440_GPB(4), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S5P6450_GPC(0), 4, S3C_GPIO_SFN(5));
+		break;
+	case 2:
+		s3c_gpio_cfgpin_range(S5P6450_GPK(0), 5, S3C_GPIO_SFN(5));
+		break;
+	default:
+		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct s3c_audio_pdata s5p6450_i2s0_pdata = {
+	.cfg_gpio = s5p6450_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN,
+			.src_clk = rclksrc,
+		},
 	},
 };
 
 struct platform_device s5p6450_device_iis0 = {
 	.name		= "samsung-i2s",
 	.id		= 0,
-	.num_resources	= ARRAY_SIZE(s5p64x0_iis0_resource),
-	.resource	= s5p64x0_iis0_resource,
+	.num_resources	= ARRAY_SIZE(s5p64x0_i2s0_resource),
+	.resource	= s5p64x0_i2s0_resource,
 	.dev = {
-		.platform_data = &s5p64x0_i2s_pdata,
+		.platform_data = &s5p6450_i2s0_pdata,
+	},
+};
+
+static struct s3c_audio_pdata s5p6450_i2s_pdata = {
+	.cfg_gpio = s5p6450_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc,
+		},
+	},
+};
+
+static struct resource s5p6450_i2s1_resource[] = {
+	[0] = {
+		.start	= S5P6450_PA_I2S1,
+		.end	= S5P6450_PA_I2S1 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S1_TX,
+		.end	= DMACH_I2S1_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S1_RX,
+		.end	= DMACH_I2S1_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5p6450_device_iis1 = {
+	.name		= "samsung-i2s",
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(s5p6450_i2s1_resource),
+	.resource	= s5p6450_i2s1_resource,
+	.dev = {
+		.platform_data = &s5p6450_i2s_pdata,
+	},
+};
+
+static struct resource s5p6450_i2s2_resource[] = {
+	[0] = {
+		.start	= S5P6450_PA_I2S2,
+		.end	= S5P6450_PA_I2S2 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S2_TX,
+		.end	= DMACH_I2S2_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S2_RX,
+		.end	= DMACH_I2S2_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5p6450_device_iis2 = {
+	.name		= "samsung-i2s",
+	.id		= 2,
+	.num_resources	= ARRAY_SIZE(s5p6450_i2s2_resource),
+	.resource	= s5p6450_i2s2_resource,
+	.dev = {
+		.platform_data = &s5p6450_i2s_pdata,
 	},
 };
 
diff --git a/arch/arm/mach-s5p64x0/gpio.c b/arch/arm/mach-s5p64x0/gpio.c
deleted file mode 100644
index 39159dd..0000000
--- a/arch/arm/mach-s5p64x0/gpio.c
+++ /dev/null
@@ -1,342 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/gpio.c
- *
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * S5P64X0 - GPIOlib support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <mach/map.h>
-#include <mach/regs-gpio.h>
-
-#include <plat/gpio-core.h>
-#include <plat/gpio-cfg.h>
-#include <plat/gpio-cfg-helpers.h>
-
-/* To be implemented S5P6450 GPIO */
-
-/*
- * S5P6440 GPIO bank summary:
- *
- * Bank	GPIOs	Style	SlpCon	ExtInt Group
- * A	6	4Bit	Yes	1
- * B	7	4Bit	Yes	1
- * C	8	4Bit	Yes	2
- * F	2	2Bit	Yes	4 [1]
- * G	7	4Bit	Yes	5
- * H	10	4Bit[2]	Yes	6
- * I	16	2Bit	Yes	None
- * J	12	2Bit	Yes	None
- * N	16	2Bit	No	IRQ_EINT
- * P	8	2Bit	Yes	8
- * R	15	4Bit[2]	Yes	8
- *
- * [1] BANKF pins 14,15 do not form part of the external interrupt sources
- * [2] BANK has two control registers, GPxCON0 and GPxCON1
- */
-
-static int s5p64x0_gpiolib_rbank_4bit2_input(struct gpio_chip *chip,
-					     unsigned int offset)
-{
-	struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
-	void __iomem *base = ourchip->base;
-	void __iomem *regcon = base;
-	unsigned long con;
-	unsigned long flags;
-
-	switch (offset) {
-	case 6:
-		offset += 1;
-	case 0:
-	case 1:
-	case 2:
-	case 3:
-	case 4:
-	case 5:
-		regcon -= 4;
-		break;
-	default:
-		offset -= 7;
-		break;
-	}
-
-	s3c_gpio_lock(ourchip, flags);
-
-	con = __raw_readl(regcon);
-	con &= ~(0xf << con_4bit_shift(offset));
-	__raw_writel(con, regcon);
-
-	s3c_gpio_unlock(ourchip, flags);
-
-	return 0;
-}
-
-static int s5p64x0_gpiolib_rbank_4bit2_output(struct gpio_chip *chip,
-					      unsigned int offset, int value)
-{
-	struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
-	void __iomem *base = ourchip->base;
-	void __iomem *regcon = base;
-	unsigned long con;
-	unsigned long dat;
-	unsigned long flags;
-	unsigned con_offset  = offset;
-
-	switch (con_offset) {
-	case 6:
-		con_offset += 1;
-	case 0:
-	case 1:
-	case 2:
-	case 3:
-	case 4:
-	case 5:
-		regcon -= 4;
-		break;
-	default:
-		con_offset -= 7;
-		break;
-	}
-
-	s3c_gpio_lock(ourchip, flags);
-
-	con = __raw_readl(regcon);
-	con &= ~(0xf << con_4bit_shift(con_offset));
-	con |= 0x1 << con_4bit_shift(con_offset);
-
-	dat = __raw_readl(base + GPIODAT_OFF);
-	if (value)
-		dat |= 1 << offset;
-	else
-		dat &= ~(1 << offset);
-
-	__raw_writel(con, regcon);
-	__raw_writel(dat, base + GPIODAT_OFF);
-
-	s3c_gpio_unlock(ourchip, flags);
-
-	return 0;
-}
-
-int s5p64x0_gpio_setcfg_4bit_rbank(struct s3c_gpio_chip *chip,
-				   unsigned int off, unsigned int cfg)
-{
-	void __iomem *reg = chip->base;
-	unsigned int shift;
-	u32 con;
-
-	switch (off) {
-	case 0:
-	case 1:
-	case 2:
-	case 3:
-	case 4:
-	case 5:
-		shift = (off & 7) * 4;
-		reg -= 4;
-		break;
-	case 6:
-		shift = ((off + 1) & 7) * 4;
-		reg -= 4;
-	default:
-		shift = ((off + 1) & 7) * 4;
-		break;
-	}
-
-	if (s3c_gpio_is_cfg_special(cfg)) {
-		cfg &= 0xf;
-		cfg <<= shift;
-	}
-
-	con = __raw_readl(reg);
-	con &= ~(0xf << shift);
-	con |= cfg;
-	__raw_writel(con, reg);
-
-	return 0;
-}
-
-static struct s3c_gpio_cfg s5p64x0_gpio_cfgs[] = {
-	{
-		.cfg_eint	= 0,
-	}, {
-		.cfg_eint	= 7,
-	}, {
-		.cfg_eint	= 3,
-		.set_config	= s5p64x0_gpio_setcfg_4bit_rbank,
-	}, {
-		.cfg_eint	= 0,
-		.set_config	= s3c_gpio_setcfg_s3c24xx,
-		.get_config	= s3c_gpio_getcfg_s3c24xx,
-	}, {
-		.cfg_eint	= 2,
-		.set_config	= s3c_gpio_setcfg_s3c24xx,
-		.get_config	= s3c_gpio_getcfg_s3c24xx,
-	}, {
-		.cfg_eint	= 3,
-		.set_config	= s3c_gpio_setcfg_s3c24xx,
-		.get_config	= s3c_gpio_getcfg_s3c24xx,
-	},
-};
-
-static struct s3c_gpio_chip s5p6440_gpio_4bit[] = {
-	{
-		.base	= S5P6440_GPA_BASE,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPA(0),
-			.ngpio	= S5P6440_GPIO_A_NR,
-			.label	= "GPA",
-		},
-	}, {
-		.base	= S5P6440_GPB_BASE,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPB(0),
-			.ngpio	= S5P6440_GPIO_B_NR,
-			.label	= "GPB",
-		},
-	}, {
-		.base	= S5P6440_GPC_BASE,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPC(0),
-			.ngpio	= S5P6440_GPIO_C_NR,
-			.label	= "GPC",
-		},
-	}, {
-		.base	= S5P6440_GPG_BASE,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPG(0),
-			.ngpio	= S5P6440_GPIO_G_NR,
-			.label	= "GPG",
-		},
-	},
-};
-
-static struct s3c_gpio_chip s5p6440_gpio_4bit2[] = {
-	{
-		.base	= S5P6440_GPH_BASE + 0x4,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPH(0),
-			.ngpio	= S5P6440_GPIO_H_NR,
-			.label	= "GPH",
-		},
-	},
-};
-
-static struct s3c_gpio_chip s5p6440_gpio_rbank_4bit2[] = {
-	{
-		.base	= S5P6440_GPR_BASE + 0x4,
-		.config	= &s5p64x0_gpio_cfgs[2],
-		.chip	= {
-			.base	= S5P6440_GPR(0),
-			.ngpio	= S5P6440_GPIO_R_NR,
-			.label	= "GPR",
-		},
-	},
-};
-
-static struct s3c_gpio_chip s5p6440_gpio_2bit[] = {
-	{
-		.base	= S5P6440_GPF_BASE,
-		.config	= &s5p64x0_gpio_cfgs[5],
-		.chip	= {
-			.base	= S5P6440_GPF(0),
-			.ngpio	= S5P6440_GPIO_F_NR,
-			.label	= "GPF",
-		},
-	}, {
-		.base	= S5P6440_GPI_BASE,
-		.config	= &s5p64x0_gpio_cfgs[3],
-		.chip	= {
-			.base	= S5P6440_GPI(0),
-			.ngpio	= S5P6440_GPIO_I_NR,
-			.label	= "GPI",
-		},
-	}, {
-		.base	= S5P6440_GPJ_BASE,
-		.config	= &s5p64x0_gpio_cfgs[3],
-		.chip	= {
-			.base	= S5P6440_GPJ(0),
-			.ngpio	= S5P6440_GPIO_J_NR,
-			.label	= "GPJ",
-		},
-	}, {
-		.base	= S5P6440_GPN_BASE,
-		.config	= &s5p64x0_gpio_cfgs[4],
-		.chip	= {
-			.base	= S5P6440_GPN(0),
-			.ngpio	= S5P6440_GPIO_N_NR,
-			.label	= "GPN",
-		},
-	}, {
-		.base	= S5P6440_GPP_BASE,
-		.config	= &s5p64x0_gpio_cfgs[5],
-		.chip	= {
-			.base	= S5P6440_GPP(0),
-			.ngpio	= S5P6440_GPIO_P_NR,
-			.label	= "GPP",
-		},
-	},
-};
-
-void __init s5p64x0_gpiolib_set_cfg(struct s3c_gpio_cfg *chipcfg, int nr_chips)
-{
-	for (; nr_chips > 0; nr_chips--, chipcfg++) {
-		if (!chipcfg->set_config)
-			chipcfg->set_config	= s3c_gpio_setcfg_s3c64xx_4bit;
-		if (!chipcfg->get_config)
-			chipcfg->get_config	= s3c_gpio_getcfg_s3c64xx_4bit;
-		if (!chipcfg->set_pull)
-			chipcfg->set_pull	= s3c_gpio_setpull_updown;
-		if (!chipcfg->get_pull)
-			chipcfg->get_pull	= s3c_gpio_getpull_updown;
-	}
-}
-
-static void __init s5p64x0_gpio_add_rbank_4bit2(struct s3c_gpio_chip *chip,
-						int nr_chips)
-{
-	for (; nr_chips > 0; nr_chips--, chip++) {
-		chip->chip.direction_input = s5p64x0_gpiolib_rbank_4bit2_input;
-		chip->chip.direction_output =
-					s5p64x0_gpiolib_rbank_4bit2_output;
-		s3c_gpiolib_add(chip);
-	}
-}
-
-static int __init s5p6440_gpiolib_init(void)
-{
-	struct s3c_gpio_chip *chips = s5p6440_gpio_2bit;
-	int nr_chips = ARRAY_SIZE(s5p6440_gpio_2bit);
-
-	s5p64x0_gpiolib_set_cfg(s5p64x0_gpio_cfgs,
-				ARRAY_SIZE(s5p64x0_gpio_cfgs));
-
-	for (; nr_chips > 0; nr_chips--, chips++)
-		s3c_gpiolib_add(chips);
-
-	samsung_gpiolib_add_4bit_chips(s5p6440_gpio_4bit,
-				ARRAY_SIZE(s5p6440_gpio_4bit));
-
-	samsung_gpiolib_add_4bit2_chips(s5p6440_gpio_4bit2,
-				ARRAY_SIZE(s5p6440_gpio_4bit2));
-
-	s5p64x0_gpio_add_rbank_4bit2(s5p6440_gpio_rbank_4bit2,
-				ARRAY_SIZE(s5p6440_gpio_rbank_4bit2));
-
-	return 0;
-}
-arch_initcall(s5p6440_gpiolib_init);
diff --git a/arch/arm/mach-s5p64x0/gpiolib.c b/arch/arm/mach-s5p64x0/gpiolib.c
new file mode 100644
index 0000000..e7fb3b0
--- /dev/null
+++ b/arch/arm/mach-s5p64x0/gpiolib.c
@@ -0,0 +1,511 @@
+/* linux/arch/arm/mach-s5p64x0/gpiolib.c
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5P64X0 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/map.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-clock.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
+/*
+ * S5P6440 GPIO bank summary:
+ *
+ * Bank	GPIOs	Style	SlpCon	ExtInt Group
+ * A	6	4Bit	Yes	1
+ * B	7	4Bit	Yes	1
+ * C	8	4Bit	Yes	2
+ * F	2	2Bit	Yes	4 [1]
+ * G	7	4Bit	Yes	5
+ * H	10	4Bit[2]	Yes	6
+ * I	16	2Bit	Yes	None
+ * J	12	2Bit	Yes	None
+ * N	16	2Bit	No	IRQ_EINT
+ * P	8	2Bit	Yes	8
+ * R	15	4Bit[2]	Yes	8
+ *
+ * S5P6450 GPIO bank summary:
+ *
+ * Bank	GPIOs	Style	SlpCon	ExtInt Group
+ * A	6	4Bit	Yes	1
+ * B	7	4Bit	Yes	1
+ * C	8	4Bit	Yes	2
+ * D	8	4Bit	Yes	None
+ * F	2	2Bit	Yes	None
+ * G	14	4Bit[2]	Yes	5
+ * H	10	4Bit[2]	Yes	6
+ * I	16	2Bit	Yes	None
+ * J	12	2Bit	Yes	None
+ * K	5	4Bit	Yes	None
+ * N	16	2Bit	No	IRQ_EINT
+ * P	11	2Bit	Yes	8
+ * Q	14	2Bit	Yes	None
+ * R	15	4Bit[2]	Yes	None
+ * S	8	2Bit	Yes	None
+ *
+ * [1] BANKF pins 14,15 do not form part of the external interrupt sources
+ * [2] BANK has two control registers, GPxCON0 and GPxCON1
+ */
+
+static int s5p64x0_gpiolib_rbank_4bit2_input(struct gpio_chip *chip,
+					     unsigned int offset)
+{
+	struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+	void __iomem *base = ourchip->base;
+	void __iomem *regcon = base;
+	unsigned long con;
+	unsigned long flags;
+
+	switch (offset) {
+	case 6:
+		offset += 1;
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		regcon -= 4;
+		break;
+	default:
+		offset -= 7;
+		break;
+	}
+
+	s3c_gpio_lock(ourchip, flags);
+
+	con = __raw_readl(regcon);
+	con &= ~(0xf << con_4bit_shift(offset));
+	__raw_writel(con, regcon);
+
+	s3c_gpio_unlock(ourchip, flags);
+
+	return 0;
+}
+
+static int s5p64x0_gpiolib_rbank_4bit2_output(struct gpio_chip *chip,
+					      unsigned int offset, int value)
+{
+	struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+	void __iomem *base = ourchip->base;
+	void __iomem *regcon = base;
+	unsigned long con;
+	unsigned long dat;
+	unsigned long flags;
+	unsigned con_offset  = offset;
+
+	switch (con_offset) {
+	case 6:
+		con_offset += 1;
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		regcon -= 4;
+		break;
+	default:
+		con_offset -= 7;
+		break;
+	}
+
+	s3c_gpio_lock(ourchip, flags);
+
+	con = __raw_readl(regcon);
+	con &= ~(0xf << con_4bit_shift(con_offset));
+	con |= 0x1 << con_4bit_shift(con_offset);
+
+	dat = __raw_readl(base + GPIODAT_OFF);
+	if (value)
+		dat |= 1 << offset;
+	else
+		dat &= ~(1 << offset);
+
+	__raw_writel(con, regcon);
+	__raw_writel(dat, base + GPIODAT_OFF);
+
+	s3c_gpio_unlock(ourchip, flags);
+
+	return 0;
+}
+
+int s5p64x0_gpio_setcfg_4bit_rbank(struct s3c_gpio_chip *chip,
+				   unsigned int off, unsigned int cfg)
+{
+	void __iomem *reg = chip->base;
+	unsigned int shift;
+	u32 con;
+
+	switch (off) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		shift = (off & 7) * 4;
+		reg -= 4;
+		break;
+	case 6:
+		shift = ((off + 1) & 7) * 4;
+		reg -= 4;
+	default:
+		shift = ((off + 1) & 7) * 4;
+		break;
+	}
+
+	if (s3c_gpio_is_cfg_special(cfg)) {
+		cfg &= 0xf;
+		cfg <<= shift;
+	}
+
+	con = __raw_readl(reg);
+	con &= ~(0xf << shift);
+	con |= cfg;
+	__raw_writel(con, reg);
+
+	return 0;
+}
+
+static struct s3c_gpio_cfg s5p64x0_gpio_cfgs[] = {
+	{
+		.cfg_eint	= 0,
+	}, {
+		.cfg_eint	= 7,
+	}, {
+		.cfg_eint	= 3,
+		.set_config	= s5p64x0_gpio_setcfg_4bit_rbank,
+	}, {
+		.cfg_eint	= 0,
+		.set_config	= s3c_gpio_setcfg_s3c24xx,
+		.get_config	= s3c_gpio_getcfg_s3c24xx,
+	}, {
+		.cfg_eint	= 2,
+		.set_config	= s3c_gpio_setcfg_s3c24xx,
+		.get_config	= s3c_gpio_getcfg_s3c24xx,
+	}, {
+		.cfg_eint	= 3,
+		.set_config	= s3c_gpio_setcfg_s3c24xx,
+		.get_config	= s3c_gpio_getcfg_s3c24xx,
+	},
+};
+
+static struct s3c_gpio_chip s5p6440_gpio_4bit[] = {
+	{
+		.base	= S5P64X0_GPA_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPA(0),
+			.ngpio	= S5P6440_GPIO_A_NR,
+			.label	= "GPA",
+		},
+	}, {
+		.base	= S5P64X0_GPB_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPB(0),
+			.ngpio	= S5P6440_GPIO_B_NR,
+			.label	= "GPB",
+		},
+	}, {
+		.base	= S5P64X0_GPC_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPC(0),
+			.ngpio	= S5P6440_GPIO_C_NR,
+			.label	= "GPC",
+		},
+	}, {
+		.base	= S5P64X0_GPG_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPG(0),
+			.ngpio	= S5P6440_GPIO_G_NR,
+			.label	= "GPG",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6440_gpio_4bit2[] = {
+	{
+		.base	= S5P64X0_GPH_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPH(0),
+			.ngpio	= S5P6440_GPIO_H_NR,
+			.label	= "GPH",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6440_gpio_rbank_4bit2[] = {
+	{
+		.base	= S5P64X0_GPR_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[2],
+		.chip	= {
+			.base	= S5P6440_GPR(0),
+			.ngpio	= S5P6440_GPIO_R_NR,
+			.label	= "GPR",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6440_gpio_2bit[] = {
+	{
+		.base	= S5P64X0_GPF_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6440_GPF(0),
+			.ngpio	= S5P6440_GPIO_F_NR,
+			.label	= "GPF",
+		},
+	}, {
+		.base	= S5P64X0_GPI_BASE,
+		.config	= &s5p64x0_gpio_cfgs[3],
+		.chip	= {
+			.base	= S5P6440_GPI(0),
+			.ngpio	= S5P6440_GPIO_I_NR,
+			.label	= "GPI",
+		},
+	}, {
+		.base	= S5P64X0_GPJ_BASE,
+		.config	= &s5p64x0_gpio_cfgs[3],
+		.chip	= {
+			.base	= S5P6440_GPJ(0),
+			.ngpio	= S5P6440_GPIO_J_NR,
+			.label	= "GPJ",
+		},
+	}, {
+		.base	= S5P64X0_GPN_BASE,
+		.config	= &s5p64x0_gpio_cfgs[4],
+		.chip	= {
+			.base	= S5P6440_GPN(0),
+			.ngpio	= S5P6440_GPIO_N_NR,
+			.label	= "GPN",
+		},
+	}, {
+		.base	= S5P64X0_GPP_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6440_GPP(0),
+			.ngpio	= S5P6440_GPIO_P_NR,
+			.label	= "GPP",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6450_gpio_4bit[] = {
+	{
+		.base	= S5P64X0_GPA_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPA(0),
+			.ngpio	= S5P6450_GPIO_A_NR,
+			.label	= "GPA",
+		},
+	}, {
+		.base	= S5P64X0_GPB_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPB(0),
+			.ngpio	= S5P6450_GPIO_B_NR,
+			.label	= "GPB",
+		},
+	}, {
+		.base	= S5P64X0_GPC_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPC(0),
+			.ngpio	= S5P6450_GPIO_C_NR,
+			.label	= "GPC",
+		},
+	}, {
+		.base	= S5P6450_GPD_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPD(0),
+			.ngpio	= S5P6450_GPIO_D_NR,
+			.label	= "GPD",
+		},
+	}, {
+		.base	= S5P6450_GPK_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPK(0),
+			.ngpio	= S5P6450_GPIO_K_NR,
+			.label	= "GPK",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6450_gpio_4bit2[] = {
+	{
+		.base	= S5P64X0_GPG_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPG(0),
+			.ngpio	= S5P6450_GPIO_G_NR,
+			.label	= "GPG",
+		},
+	}, {
+		.base	= S5P64X0_GPH_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPH(0),
+			.ngpio	= S5P6450_GPIO_H_NR,
+			.label	= "GPH",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6450_gpio_rbank_4bit2[] = {
+	{
+		.base	= S5P64X0_GPR_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[2],
+		.chip	= {
+			.base	= S5P6450_GPR(0),
+			.ngpio	= S5P6450_GPIO_R_NR,
+			.label	= "GPR",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6450_gpio_2bit[] = {
+	{
+		.base	= S5P64X0_GPF_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6450_GPF(0),
+			.ngpio	= S5P6450_GPIO_F_NR,
+			.label	= "GPF",
+		},
+	}, {
+		.base	= S5P64X0_GPI_BASE,
+		.config	= &s5p64x0_gpio_cfgs[3],
+		.chip	= {
+			.base	= S5P6450_GPI(0),
+			.ngpio	= S5P6450_GPIO_I_NR,
+			.label	= "GPI",
+		},
+	}, {
+		.base	= S5P64X0_GPJ_BASE,
+		.config	= &s5p64x0_gpio_cfgs[3],
+		.chip	= {
+			.base	= S5P6450_GPJ(0),
+			.ngpio	= S5P6450_GPIO_J_NR,
+			.label	= "GPJ",
+		},
+	}, {
+		.base	= S5P64X0_GPN_BASE,
+		.config	= &s5p64x0_gpio_cfgs[4],
+		.chip	= {
+			.base	= S5P6450_GPN(0),
+			.ngpio	= S5P6450_GPIO_N_NR,
+			.label	= "GPN",
+		},
+	}, {
+		.base	= S5P64X0_GPP_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6450_GPP(0),
+			.ngpio	= S5P6450_GPIO_P_NR,
+			.label	= "GPP",
+		},
+	}, {
+		.base	= S5P6450_GPQ_BASE,
+		.config	= &s5p64x0_gpio_cfgs[4],
+		.chip	= {
+			.base	= S5P6450_GPQ(0),
+			.ngpio	= S5P6450_GPIO_Q_NR,
+			.label	= "GPQ",
+		},
+	}, {
+		.base	= S5P6450_GPS_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6450_GPS(0),
+			.ngpio	= S5P6450_GPIO_S_NR,
+			.label	= "GPS",
+		},
+	},
+};
+
+void __init s5p64x0_gpiolib_set_cfg(struct s3c_gpio_cfg *chipcfg, int nr_chips)
+{
+	for (; nr_chips > 0; nr_chips--, chipcfg++) {
+		if (!chipcfg->set_config)
+			chipcfg->set_config	= s3c_gpio_setcfg_s3c64xx_4bit;
+		if (!chipcfg->get_config)
+			chipcfg->get_config	= s3c_gpio_getcfg_s3c64xx_4bit;
+		if (!chipcfg->set_pull)
+			chipcfg->set_pull	= s3c_gpio_setpull_updown;
+		if (!chipcfg->get_pull)
+			chipcfg->get_pull	= s3c_gpio_getpull_updown;
+	}
+}
+
+static void __init s5p64x0_gpio_add_rbank_4bit2(struct s3c_gpio_chip *chip,
+						int nr_chips)
+{
+	for (; nr_chips > 0; nr_chips--, chip++) {
+		chip->chip.direction_input = s5p64x0_gpiolib_rbank_4bit2_input;
+		chip->chip.direction_output =
+					s5p64x0_gpiolib_rbank_4bit2_output;
+		s3c_gpiolib_add(chip);
+	}
+}
+
+static int __init s5p64x0_gpiolib_init(void)
+{
+	unsigned int chipid;
+
+	chipid = __raw_readl(S5P64X0_SYS_ID);
+
+	s5p64x0_gpiolib_set_cfg(s5p64x0_gpio_cfgs,
+				ARRAY_SIZE(s5p64x0_gpio_cfgs));
+
+	if ((chipid & 0xff000) == 0x50000) {
+		samsung_gpiolib_add_2bit_chips(s5p6450_gpio_2bit,
+					ARRAY_SIZE(s5p6450_gpio_2bit));
+
+		samsung_gpiolib_add_4bit_chips(s5p6450_gpio_4bit,
+					ARRAY_SIZE(s5p6450_gpio_4bit));
+
+		samsung_gpiolib_add_4bit2_chips(s5p6450_gpio_4bit2,
+					ARRAY_SIZE(s5p6450_gpio_4bit2));
+
+		s5p64x0_gpio_add_rbank_4bit2(s5p6450_gpio_rbank_4bit2,
+					ARRAY_SIZE(s5p6450_gpio_rbank_4bit2));
+	} else {
+		samsung_gpiolib_add_2bit_chips(s5p6440_gpio_2bit,
+					ARRAY_SIZE(s5p6440_gpio_2bit));
+
+		samsung_gpiolib_add_4bit_chips(s5p6440_gpio_4bit,
+					ARRAY_SIZE(s5p6440_gpio_4bit));
+
+		samsung_gpiolib_add_4bit2_chips(s5p6440_gpio_4bit2,
+					ARRAY_SIZE(s5p6440_gpio_4bit2));
+
+		s5p64x0_gpio_add_rbank_4bit2(s5p6440_gpio_rbank_4bit2,
+					ARRAY_SIZE(s5p6440_gpio_rbank_4bit2));
+	}
+
+	return 0;
+}
+core_initcall(s5p64x0_gpiolib_init);
diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h
index 31e5341..a9365e5 100644
--- a/arch/arm/mach-s5p64x0/include/mach/map.h
+++ b/arch/arm/mach-s5p64x0/include/mach/map.h
@@ -29,6 +29,9 @@
 #define S5P64X0_PA_VIC0		(0xE4000000)
 #define S5P64X0_PA_VIC1		(0xE4100000)
 
+#define S5P64X0_PA_SROMC	(0xE7000000)
+#define S5P_PA_SROMC		S5P64X0_PA_SROMC
+
 #define S5P64X0_PA_PDMA		(0xE9000000)
 
 #define S5P64X0_PA_TIMER	(0xEA000000)
@@ -63,6 +66,8 @@
 #define S5P64X0_PA_HSMMC(x)	(0xED800000 + ((x) * 0x100000))
 
 #define S5P64X0_PA_I2S		(0xF2000000)
+#define S5P6450_PA_I2S1		0xF2800000
+#define S5P6450_PA_I2S2		0xF2900000
 
 #define S5P64X0_PA_PCM		(0xF2100000)
 
diff --git a/arch/arm/mach-s5p64x0/include/mach/regs-gpio.h b/arch/arm/mach-s5p64x0/include/mach/regs-gpio.h
index 85f448e..0953ef6 100644
--- a/arch/arm/mach-s5p64x0/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s5p64x0/include/mach/regs-gpio.h
@@ -15,48 +15,23 @@
 
 #include <mach/map.h>
 
-/* Will be implemented S5P6442 GPIOlib */
-
 /* Base addresses for each of the banks */
 
-#define S5P6440_GPA_BASE		(S5P_VA_GPIO + 0x0000)
-#define S5P6440_GPB_BASE		(S5P_VA_GPIO + 0x0020)
-#define S5P6440_GPC_BASE		(S5P_VA_GPIO + 0x0040)
-#define S5P6440_GPF_BASE		(S5P_VA_GPIO + 0x00A0)
-#define S5P6440_GPG_BASE		(S5P_VA_GPIO + 0x00C0)
-#define S5P6440_GPH_BASE		(S5P_VA_GPIO + 0x00E0)
-#define S5P6440_GPI_BASE		(S5P_VA_GPIO + 0x0100)
-#define S5P6440_GPJ_BASE		(S5P_VA_GPIO + 0x0120)
-#define S5P6440_GPN_BASE		(S5P_VA_GPIO + 0x0830)
-#define S5P6440_GPP_BASE		(S5P_VA_GPIO + 0x0160)
-#define S5P6440_GPR_BASE		(S5P_VA_GPIO + 0x0290)
+#define S5P64X0_GPA_BASE		(S5P_VA_GPIO + 0x0000)
+#define S5P64X0_GPB_BASE		(S5P_VA_GPIO + 0x0020)
+#define S5P64X0_GPC_BASE		(S5P_VA_GPIO + 0x0040)
+#define S5P64X0_GPF_BASE		(S5P_VA_GPIO + 0x00A0)
+#define S5P64X0_GPG_BASE		(S5P_VA_GPIO + 0x00C0)
+#define S5P64X0_GPH_BASE		(S5P_VA_GPIO + 0x00E0)
+#define S5P64X0_GPI_BASE		(S5P_VA_GPIO + 0x0100)
+#define S5P64X0_GPJ_BASE		(S5P_VA_GPIO + 0x0120)
+#define S5P64X0_GPN_BASE		(S5P_VA_GPIO + 0x0830)
+#define S5P64X0_GPP_BASE		(S5P_VA_GPIO + 0x0160)
+#define S5P64X0_GPR_BASE		(S5P_VA_GPIO + 0x0290)
 
-#define S5P6440_EINT0CON0		(S5P_VA_GPIO + 0x900)
-#define S5P6440_EINT0FLTCON0		(S5P_VA_GPIO + 0x910)
-#define S5P6440_EINT0FLTCON1		(S5P_VA_GPIO + 0x914)
-#define S5P6440_EINT0MASK		(S5P_VA_GPIO + 0x920)
-#define S5P6440_EINT0PEND		(S5P_VA_GPIO + 0x924)
-
-/* for LCD */
-
-#define S5P6440_SPCON_LCD_SEL_RGB	(1 << 0)
-#define S5P6440_SPCON_LCD_SEL_MASK	(3 << 0)
-
-/*
- * These set of macros are not really useful for the
- * GPF/GPI/GPJ/GPN/GPP, useful for others set of GPIO's (4 bit)
- */
-
-#define S5P6440_GPIO_CONMASK(__gpio)	(0xf << ((__gpio) * 4))
-#define S5P6440_GPIO_INPUT(__gpio)	(0x0 << ((__gpio) * 4))
-#define S5P6440_GPIO_OUTPUT(__gpio)	(0x1 << ((__gpio) * 4))
-
-/*
- * Use these macros for GPF/GPI/GPJ/GPN/GPP set of GPIO (2 bit)
- */
-
-#define S5P6440_GPIO2_CONMASK(__gpio)	(0x3 << ((__gpio) * 2))
-#define S5P6440_GPIO2_INPUT(__gpio)	(0x0 << ((__gpio) * 2))
-#define S5P6440_GPIO2_OUTPUT(__gpio)	(0x1 << ((__gpio) * 2))
+#define S5P6450_GPD_BASE		(S5P_VA_GPIO + 0x0060)
+#define S5P6450_GPK_BASE		(S5P_VA_GPIO + 0x0140)
+#define S5P6450_GPQ_BASE		(S5P_VA_GPIO + 0x0180)
+#define S5P6450_GPS_BASE		(S5P_VA_GPIO + 0x0300)
 
 #endif /* __ASM_ARCH_REGS_GPIO_H */
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index 87c3f03..e980275 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -117,6 +117,7 @@
 
 static struct i2c_board_info smdk6440_i2c_devs0[] __initdata = {
 	{ I2C_BOARD_INFO("24c08", 0x50), },
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
 };
 
 static struct i2c_board_info smdk6440_i2c_devs1[] __initdata = {
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index d609f5a..b78f562 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -135,6 +135,7 @@
 };
 
 static struct i2c_board_info smdk6450_i2c_devs0[] __initdata = {
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
 	{ I2C_BOARD_INFO("24c08", 0x50), },	/* Samsung KS24C080C EEPROM */
 };
 
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index 2d4a761..0305e9b 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -396,7 +396,7 @@
  * recommended to keep the following clocks disabled until the driver requests
  * for enabling the clock.
  */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "cssys",
 		.id		= -1,
@@ -1381,8 +1381,6 @@
 
 void __init s5pc100_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
 	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
@@ -1393,16 +1391,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h
index 32e9cab..328467b 100644
--- a/arch/arm/mach-s5pc100/include/mach/map.h
+++ b/arch/arm/mach-s5pc100/include/mach/map.h
@@ -55,6 +55,8 @@
 #define S5PC100_VA_VIC_OFFSET	0x10000
 #define S5PC1XX_VA_VIC(x)	(S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET))
 
+#define S5PC100_PA_SROMC	(0xE7000000)
+#define S5P_PA_SROMC		S5PC100_PA_SROMC
 
 #define S5PC100_PA_ONENAND	(0xE7100000)
 
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index 862f239..53aabef 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -118,6 +118,7 @@
 config MACH_SMDKV210
 	bool "SMDKV210"
 	select CPU_S5PV210
+	select S3C_DEV_FB
 	select S3C_DEV_HSMMC
 	select S3C_DEV_HSMMC1
 	select S3C_DEV_HSMMC2
@@ -130,6 +131,7 @@
 	select SAMSUNG_DEV_IDE
 	select SAMSUNG_DEV_KEYPAD
 	select SAMSUNG_DEV_TS
+	select S5PV210_SETUP_FB_24BPP
 	select S5PV210_SETUP_I2C1
 	select S5PV210_SETUP_I2C2
 	select S5PV210_SETUP_IDE
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index b774ff1..2d59949 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -309,7 +309,7 @@
 	.get_rate	= s5pv210_clk_fout_apll_get_rate,
 };
 
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "pdma",
 		.id		= 0,
@@ -525,6 +525,12 @@
 		.parent		= &clk_pclk_psys.clk,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1 << 20),
+	}, {
+		.name		= "sromc",
+		.id		= -1,
+		.parent		= &clk_hclk_psys.clk,
+		.enable		= s5pv210_clk_ip1_ctrl,
+		.ctrlbit	= (1 << 26),
 	},
 };
 
@@ -1220,13 +1226,9 @@
 
 void __init s5pv210_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
-	ret = s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
-	if (ret > 0)
-		printk(KERN_ERR "Failed to register %u clocks\n", ret);
+	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
 
 	for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
 		s3c_register_clksrc(sysclks[ptr], 1);
@@ -1234,15 +1236,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c
index 8eb480e..61e6c24 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/cpu.c
@@ -81,11 +81,6 @@
 		.length		= SZ_512K,
 		.type		= MT_DEVICE,
 	}, {
-		.virtual	= (unsigned long)S5P_VA_SROMC,
-		.pfn		= __phys_to_pfn(S5PV210_PA_SROMC),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
 		.virtual	= (unsigned long)S5P_VA_DMC0,
 		.pfn		= __phys_to_pfn(S5PV210_PA_DMC0),
 		.length		= SZ_4K,
diff --git a/arch/arm/mach-s5pv210/include/mach/irqs.h b/arch/arm/mach-s5pv210/include/mach/irqs.h
index 119b95f..26710b3 100644
--- a/arch/arm/mach-s5pv210/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv210/include/mach/irqs.h
@@ -65,7 +65,7 @@
 #define IRQ_HSMMC0		S5P_IRQ_VIC1(26)
 #define IRQ_HSMMC1		S5P_IRQ_VIC1(27)
 #define IRQ_HSMMC2		S5P_IRQ_VIC1(28)
-#define IRQ_MIPICSI		S5P_IRQ_VIC1(29)
+#define IRQ_MIPI_CSIS		S5P_IRQ_VIC1(29)
 #define IRQ_MIPIDSI		S5P_IRQ_VIC1(30)
 #define IRQ_ONENAND_AUDI	S5P_IRQ_VIC1(31)
 
@@ -132,5 +132,6 @@
 #define IRQ_LCD_FIFO		IRQ_LCD0
 #define IRQ_LCD_VSYNC		IRQ_LCD1
 #define IRQ_LCD_SYSTEM		IRQ_LCD2
+#define IRQ_MIPI_CSIS0		IRQ_MIPI_CSIS
 
 #endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h
index 861d7fe..3611492 100644
--- a/arch/arm/mach-s5pv210/include/mach/map.h
+++ b/arch/arm/mach-s5pv210/include/mach/map.h
@@ -16,6 +16,8 @@
 #include <plat/map-base.h>
 #include <plat/map-s5p.h>
 
+#define S5PV210_PA_SROM_BANK5	(0xA8000000)
+
 #define S5PC110_PA_ONENAND	(0xB0000000)
 #define S5P_PA_ONENAND		S5PC110_PA_ONENAND
 
@@ -60,6 +62,7 @@
 #define S3C_VA_UARTx(x)		(S3C_VA_UART + ((x) * S3C_UART_OFFSET))
 
 #define S5PV210_PA_SROMC	(0xE8000000)
+#define S5P_PA_SROMC		S5PV210_PA_SROMC
 
 #define S5PV210_PA_CFCON	(0xE8200000)
 
@@ -107,6 +110,8 @@
 #define S5PV210_PA_DMC0		(0xF0000000)
 #define S5PV210_PA_DMC1		(0xF1400000)
 
+#define S5PV210_PA_MIPI_CSIS	0xFA600000
+
 /* compatibiltiy defines. */
 #define S3C_PA_UART		S5PV210_PA_UART
 #define S3C_PA_HSMMC0		S5PV210_PA_HSMMC(0)
@@ -123,6 +128,7 @@
 #define S5P_PA_FIMC0		S5PV210_PA_FIMC0
 #define S5P_PA_FIMC1		S5PV210_PA_FIMC1
 #define S5P_PA_FIMC2		S5PV210_PA_FIMC2
+#define S5P_PA_MIPI_CSIS0	S5PV210_PA_MIPI_CSIS
 
 #define SAMSUNG_PA_ADC		S5PV210_PA_ADC
 #define SAMSUNG_PA_CFCON	S5PV210_PA_CFCON
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-clock.h b/arch/arm/mach-s5pv210/include/mach/regs-clock.h
index ebaabe0..4c45b74 100644
--- a/arch/arm/mach-s5pv210/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5pv210/include/mach/regs-clock.h
@@ -161,7 +161,7 @@
 #define S5P_MDNIE_SEL		S5P_CLKREG(0x7008)
 #define S5P_MIPI_PHY_CON0	S5P_CLKREG(0x7200)
 #define S5P_MIPI_PHY_CON1	S5P_CLKREG(0x7204)
-#define S5P_MIPI_CONTROL	S5P_CLKREG(0xE814)
+#define S5P_MIPI_DPHY_CONTROL	S5P_CLKREG(0xE814)
 
 #define S5P_IDLE_CFG_TL_MASK	(3 << 30)
 #define S5P_IDLE_CFG_TM_MASK	(3 << 28)
@@ -195,9 +195,6 @@
 #define S5P_OTHERS_RET_UART		(1 << 28)
 #define S5P_OTHERS_USB_SIG_MASK		(1 << 16)
 
-/* MIPI */
-#define S5P_MIPI_DPHY_EN		(3)
-
 /* S5P_DAC_CONTROL */
 #define S5P_DAC_ENABLE			(1)
 #define S5P_DAC_DISABLE			(0)
diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c
index 5dd1681..bb20a14 100644
--- a/arch/arm/mach-s5pv210/mach-smdkc110.c
+++ b/arch/arm/mach-s5pv210/mach-smdkc110.c
@@ -94,6 +94,7 @@
 
 static struct i2c_board_info smdkc110_i2c_devs0[] __initdata = {
 	{ I2C_BOARD_INFO("24c08", 0x50), },     /* Samsung S524AD0XD1 */
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
 };
 
 static struct i2c_board_info smdkc110_i2c_devs1[] __initdata = {
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index 1fbc45b..88e4522 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -14,16 +14,25 @@
 #include <linux/init.h>
 #include <linux/serial_core.h>
 #include <linux/sysdev.h>
+#include <linux/dm9000.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/setup.h>
 #include <asm/mach-types.h>
 
+#include <video/platform_lcd.h>
+
 #include <mach/map.h>
 #include <mach/regs-clock.h>
+#include <mach/regs-fb.h>
 
 #include <plat/regs-serial.h>
+#include <plat/regs-srom.h>
+#include <plat/gpio-cfg.h>
 #include <plat/s5pv210.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
@@ -33,6 +42,7 @@
 #include <plat/iic.h>
 #include <plat/keypad.h>
 #include <plat/pm.h>
+#include <plat/fb.h>
 
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKV210_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
@@ -102,12 +112,106 @@
 	.cols		= 8,
 };
 
+static struct resource smdkv210_dm9000_resources[] = {
+	[0] = {
+		.start	= S5PV210_PA_SROM_BANK5,
+		.end	= S5PV210_PA_SROM_BANK5,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= S5PV210_PA_SROM_BANK5 + 2,
+		.end	= S5PV210_PA_SROM_BANK5 + 2,
+		.flags	= IORESOURCE_MEM,
+	},
+	[2] = {
+		.start	= IRQ_EINT(9),
+		.end	= IRQ_EINT(9),
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+	},
+};
+
+static struct dm9000_plat_data smdkv210_dm9000_platdata = {
+	.flags		= DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
+	.dev_addr	= { 0x00, 0x09, 0xc0, 0xff, 0xec, 0x48 },
+};
+
+struct platform_device smdkv210_dm9000 = {
+	.name		= "dm9000",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smdkv210_dm9000_resources),
+	.resource	= smdkv210_dm9000_resources,
+	.dev		= {
+		.platform_data	= &smdkv210_dm9000_platdata,
+	},
+};
+
+static void smdkv210_lte480wv_set_power(struct plat_lcd_data *pd,
+					unsigned int power)
+{
+	if (power) {
+#if !defined(CONFIG_BACKLIGHT_PWM)
+		gpio_request(S5PV210_GPD0(3), "GPD0");
+		gpio_direction_output(S5PV210_GPD0(3), 1);
+		gpio_free(S5PV210_GPD0(3));
+#endif
+
+		/* fire nRESET on power up */
+		gpio_request(S5PV210_GPH0(6), "GPH0");
+
+		gpio_direction_output(S5PV210_GPH0(6), 1);
+
+		gpio_set_value(S5PV210_GPH0(6), 0);
+		mdelay(10);
+
+		gpio_set_value(S5PV210_GPH0(6), 1);
+		mdelay(10);
+
+		gpio_free(S5PV210_GPH0(6));
+	} else {
+#if !defined(CONFIG_BACKLIGHT_PWM)
+		gpio_request(S5PV210_GPD0(3), "GPD0");
+		gpio_direction_output(S5PV210_GPD0(3), 0);
+		gpio_free(S5PV210_GPD0(3));
+#endif
+	}
+}
+
+static struct plat_lcd_data smdkv210_lcd_lte480wv_data = {
+	.set_power	= smdkv210_lte480wv_set_power,
+};
+
+static struct platform_device smdkv210_lcd_lte480wv = {
+	.name			= "platform-lcd",
+	.dev.parent		= &s3c_device_fb.dev,
+	.dev.platform_data	= &smdkv210_lcd_lte480wv_data,
+};
+
+static struct s3c_fb_pd_win smdkv210_fb_win0 = {
+	.win_mode = {
+		.left_margin	= 13,
+		.right_margin	= 8,
+		.upper_margin	= 7,
+		.lower_margin	= 5,
+		.hsync_len	= 3,
+		.vsync_len	= 1,
+		.xres		= 800,
+		.yres		= 480,
+	},
+	.max_bpp	= 32,
+	.default_bpp	= 24,
+};
+
+static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = {
+	.win[0]		= &smdkv210_fb_win0,
+	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+	.setup_gpio	= s5pv210_fb_gpio_setup_24bpp,
+};
+
 static struct platform_device *smdkv210_devices[] __initdata = {
-	&s5pv210_device_iis0,
-	&s5pv210_device_ac97,
-	&s5pv210_device_spdif,
 	&s3c_device_adc,
 	&s3c_device_cfcon,
+	&s3c_device_fb,
 	&s3c_device_hsmmc0,
 	&s3c_device_hsmmc1,
 	&s3c_device_hsmmc2,
@@ -115,14 +219,37 @@
 	&s3c_device_i2c0,
 	&s3c_device_i2c1,
 	&s3c_device_i2c2,
-	&samsung_device_keypad,
 	&s3c_device_rtc,
 	&s3c_device_ts,
 	&s3c_device_wdt,
+	&s5pv210_device_ac97,
+	&s5pv210_device_iis0,
+	&s5pv210_device_spdif,
+	&samsung_device_keypad,
+	&smdkv210_dm9000,
+	&smdkv210_lcd_lte480wv,
 };
 
+static void __init smdkv210_dm9000_init(void)
+{
+	unsigned int tmp;
+
+	gpio_request(S5PV210_MP01(5), "nCS5");
+	s3c_gpio_cfgpin(S5PV210_MP01(5), S3C_GPIO_SFN(2));
+	gpio_free(S5PV210_MP01(5));
+
+	tmp = (5 << S5P_SROM_BCX__TACC__SHIFT);
+	__raw_writel(tmp, S5P_SROM_BC5);
+
+	tmp = __raw_readl(S5P_SROM_BW);
+	tmp &= (S5P_SROM_BW__CS_MASK << S5P_SROM_BW__NCS5__SHIFT);
+	tmp |= (1 << S5P_SROM_BW__NCS5__SHIFT);
+	__raw_writel(tmp, S5P_SROM_BW);
+}
+
 static struct i2c_board_info smdkv210_i2c_devs0[] __initdata = {
 	{ I2C_BOARD_INFO("24c08", 0x50), },     /* Samsung S524AD0XD1 */
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
 };
 
 static struct i2c_board_info smdkv210_i2c_devs1[] __initdata = {
@@ -150,6 +277,8 @@
 {
 	s3c_pm_init();
 
+	smdkv210_dm9000_init();
+
 	samsung_keypad_set_platdata(&smdkv210_keypad_data);
 	s3c24xx_ts_set_platdata(&s3c_ts_platform);
 
@@ -165,6 +294,8 @@
 
 	s3c_ide_set_platdata(&smdkv210_ide_pdata);
 
+	s3c_fb_set_platdata(&smdkv210_lcd0_pdata);
+
 	platform_add_devices(smdkv210_devices, ARRAY_SIZE(smdkv210_devices));
 }
 
diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-s5pv310/Kconfig
index d64efe0..09c4c21 100644
--- a/arch/arm/mach-s5pv310/Kconfig
+++ b/arch/arm/mach-s5pv310/Kconfig
@@ -15,6 +15,11 @@
 	help
 	  Enable S5PV310 CPU support
 
+config S5PV310_DEV_PD
+	bool
+	help
+	  Compile in platform device definitions for Power Domain
+
 config S5PV310_SETUP_I2C1
 	bool
 	help
@@ -61,6 +66,11 @@
 	help
 	  Common setup code for SDHCI gpio.
 
+config S5PV310_DEV_SYSMMU
+	bool
+	help
+	  Common setup code for SYSTEM MMU in S5PV310
+
 # machine support
 
 menu "S5PC210 Machines"
@@ -70,11 +80,15 @@
 	select CPU_S5PV310
 	select S3C_DEV_RTC
 	select S3C_DEV_WDT
+	select S3C_DEV_I2C1
 	select S3C_DEV_HSMMC
 	select S3C_DEV_HSMMC1
 	select S3C_DEV_HSMMC2
 	select S3C_DEV_HSMMC3
+	select S5PV310_DEV_PD
+	select S5PV310_SETUP_I2C1
 	select S5PV310_SETUP_SDHCI
+	select S5PV310_DEV_SYSMMU
 	help
 	  Machine support for Samsung SMDKC210
 	  S5PC210(MCP) is one of package option of S5PV310
@@ -83,6 +97,10 @@
 	bool "Mobile UNIVERSAL_C210 Board"
 	select CPU_S5PV310
 	select S5P_DEV_ONENAND
+	select S3C_DEV_HSMMC
+	select S3C_DEV_HSMMC2
+	select S3C_DEV_HSMMC3
+	select S5PV310_SETUP_SDHCI
 	select S3C_DEV_I2C1
 	select S5PV310_SETUP_I2C1
 	help
@@ -98,10 +116,13 @@
 	select CPU_S5PV310
 	select S3C_DEV_RTC
 	select S3C_DEV_WDT
+	select S3C_DEV_I2C1
 	select S3C_DEV_HSMMC
 	select S3C_DEV_HSMMC1
 	select S3C_DEV_HSMMC2
 	select S3C_DEV_HSMMC3
+	select S5PV310_DEV_PD
+	select S5PV310_SETUP_I2C1
 	select S5PV310_SETUP_SDHCI
 	help
 	  Machine support for Samsung SMDKV310
diff --git a/arch/arm/mach-s5pv310/Makefile b/arch/arm/mach-s5pv310/Makefile
index 61e3cb6..036fb38 100644
--- a/arch/arm/mach-s5pv310/Makefile
+++ b/arch/arm/mach-s5pv310/Makefile
@@ -14,6 +14,7 @@
 
 obj-$(CONFIG_CPU_S5PV310)	+= cpu.o init.o clock.o irq-combiner.o
 obj-$(CONFIG_CPU_S5PV310)	+= setup-i2c0.o time.o gpiolib.o irq-eint.o dma.o
+obj-$(CONFIG_CPU_FREQ)		+= cpufreq.o
 
 obj-$(CONFIG_SMP)		+= platsmp.o headsmp.o
 obj-$(CONFIG_LOCAL_TIMERS)	+= localtimer.o
@@ -27,7 +28,10 @@
 
 # device support
 
-obj-y += dev-audio.o
+obj-y					+= dev-audio.o
+obj-$(CONFIG_S5PV310_DEV_PD)		+= dev-pd.o
+obj-$(CONFIG_S5PV310_DEV_SYSMMU)	+= dev-sysmmu.o
+
 obj-$(CONFIG_S5PV310_SETUP_I2C1)	+= setup-i2c1.o
 obj-$(CONFIG_S5PV310_SETUP_I2C2)	+= setup-i2c2.o
 obj-$(CONFIG_S5PV310_SETUP_I2C3)	+= setup-i2c3.o
diff --git a/arch/arm/mach-s5pv310/clock.c b/arch/arm/mach-s5pv310/clock.c
index 58c9d33..fc7c2f8 100644
--- a/arch/arm/mach-s5pv310/clock.c
+++ b/arch/arm/mach-s5pv310/clock.c
@@ -244,7 +244,7 @@
 		.id		= -1,
 	},
 	.sources	= &clkset_mout_corebus,
-	.reg_src	= { .reg = S5P_CLKSRC_CORE, .shift = 4, .size = 1 },
+	.reg_src	= { .reg = S5P_CLKSRC_DMC, .shift = 4, .size = 1 },
 };
 
 static struct clksrc_clk clk_sclk_dmc = {
@@ -253,7 +253,7 @@
 		.id		= -1,
 		.parent		= &clk_mout_corebus.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 12, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 12, .size = 3 },
 };
 
 static struct clksrc_clk clk_aclk_cored = {
@@ -262,7 +262,7 @@
 		.id		= -1,
 		.parent		= &clk_sclk_dmc.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 16, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 16, .size = 3 },
 };
 
 static struct clksrc_clk clk_aclk_corep = {
@@ -271,7 +271,7 @@
 		.id		= -1,
 		.parent		= &clk_aclk_cored.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 20, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 20, .size = 3 },
 };
 
 static struct clksrc_clk clk_aclk_acp = {
@@ -280,7 +280,7 @@
 		.id		= -1,
 		.parent		= &clk_mout_corebus.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 0, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 0, .size = 3 },
 };
 
 static struct clksrc_clk clk_pclk_acp = {
@@ -289,7 +289,7 @@
 		.id		= -1,
 		.parent		= &clk_aclk_acp.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 4, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 4, .size = 3 },
 };
 
 /* Core list of CMU_TOP side */
@@ -384,7 +384,7 @@
 	.reg_src	= { .reg = S5P_CLKSRC_TOP0, .shift = 8, .size = 1 },
 };
 
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "timers",
 		.id		= -1,
@@ -467,6 +467,16 @@
 		.enable		= s5pv310_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 10),
 	}, {
+		.name		= "pdma",
+		.id		= 0,
+		.enable		= s5pv310_clk_ip_fsys_ctrl,
+		.ctrlbit	= (1 << 0),
+	}, {
+		.name		= "pdma",
+		.id		= 1,
+		.enable		= s5pv310_clk_ip_fsys_ctrl,
+		.ctrlbit	= (1 << 1),
+	}, {
 		.name		= "adc",
 		.id		= -1,
 		.enable		= s5pv310_clk_ip_peril_ctrl,
@@ -507,6 +517,26 @@
 		.enable		= s5pv310_clk_ip_peril_ctrl,
 		.ctrlbit	= (1 << 18),
 	}, {
+		.name		= "iis",
+		.id		= 0,
+		.enable		= s5pv310_clk_ip_peril_ctrl,
+		.ctrlbit	= (1 << 19),
+	}, {
+		.name		= "iis",
+		.id		= 1,
+		.enable		= s5pv310_clk_ip_peril_ctrl,
+		.ctrlbit	= (1 << 20),
+	}, {
+		.name		= "iis",
+		.id		= 2,
+		.enable		= s5pv310_clk_ip_peril_ctrl,
+		.ctrlbit	= (1 << 21),
+	}, {
+		.name		= "ac97",
+		.id		= -1,
+		.enable		= s5pv310_clk_ip_peril_ctrl,
+		.ctrlbit	= (1 << 27),
+	}, {
 		.name		= "fimg2d",
 		.id		= -1,
 		.enable		= s5pv310_clk_ip_image_ctrl,
@@ -990,6 +1020,17 @@
 	&clk_dout_mmc4,
 };
 
+static int xtal_rate;
+
+static unsigned long s5pv310_fout_apll_get_rate(struct clk *clk)
+{
+	return s5p_get_pll45xx(xtal_rate, __raw_readl(S5P_APLL_CON0), pll_4508);
+}
+
+static struct clk_ops s5pv310_fout_apll_ops = {
+	.get_rate = s5pv310_fout_apll_get_rate,
+};
+
 void __init_or_cpufreq s5pv310_setup_clocks(void)
 {
 	struct clk *xtal_clk;
@@ -1013,6 +1054,9 @@
 	BUG_ON(IS_ERR(xtal_clk));
 
 	xtal = clk_get_rate(xtal_clk);
+
+	xtal_rate = xtal;
+
 	clk_put(xtal_clk);
 
 	printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
@@ -1026,7 +1070,7 @@
 	vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
 				__raw_readl(S5P_VPLL_CON1), pll_4650);
 
-	clk_fout_apll.rate = apll;
+	clk_fout_apll.ops = &s5pv310_fout_apll_ops;
 	clk_fout_mpll.rate = mpll;
 	clk_fout_epll.rate = epll;
 	clk_fout_vpll.rate = vpll;
@@ -1061,13 +1105,9 @@
 
 void __init s5pv310_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
-	ret = s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
-	if (ret > 0)
-		printk(KERN_ERR "Failed to register %u clocks\n", ret);
+	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
 
 	for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
 		s3c_register_clksrc(sysclks[ptr], 1);
@@ -1075,15 +1115,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5pv310/cpu.c b/arch/arm/mach-s5pv310/cpu.c
index 72ab289..0db0fb6 100644
--- a/arch/arm/mach-s5pv310/cpu.c
+++ b/arch/arm/mach-s5pv310/cpu.c
@@ -41,6 +41,11 @@
 		.length		= SZ_128K,
 		.type		= MT_DEVICE,
 	}, {
+		.virtual	= (unsigned long)S5P_VA_PMU,
+		.pfn		= __phys_to_pfn(S5PV310_PA_PMU),
+		.length		= SZ_64K,
+		.type		= MT_DEVICE,
+	}, {
 		.virtual	= (unsigned long)S5P_VA_COMBINER_BASE,
 		.pfn		= __phys_to_pfn(S5PV310_PA_COMBINER),
 		.length		= SZ_4K,
@@ -71,6 +76,11 @@
 		.length		= SZ_256,
 		.type		= MT_DEVICE,
 	}, {
+		.virtual	= (unsigned long)S5P_VA_DMC0,
+		.pfn		= __phys_to_pfn(S5PV310_PA_DMC0),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
 		.virtual	= (unsigned long)S3C_VA_UART,
 		.pfn		= __phys_to_pfn(S3C_PA_UART),
 		.length		= SZ_512K,
@@ -123,6 +133,15 @@
 	gic_init(0, IRQ_LOCALTIMER, S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
 
 	for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
+
+		/*
+		 * From SPI(0) to SPI(39) and SPI(51), SPI(53) are
+		 * connected to the interrupt combiner. These irqs
+		 * should be initialized to support cascade interrupt.
+		 */
+		if ((irq >= 40) && !(irq == 51) && !(irq == 53))
+			continue;
+
 		combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
 				COMBINER_IRQ(irq, 0));
 		combiner_cascade_irq(irq, IRQ_SPI(irq));
@@ -164,7 +183,7 @@
 	__raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
 		     S5P_VA_L2CC + L2X0_POWER_CTRL);
 
-	l2x0_init(S5P_VA_L2CC, 0x7C070001, 0xC200ffff);
+	l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
 
 	return 0;
 }
diff --git a/arch/arm/mach-s5pv310/cpufreq.c b/arch/arm/mach-s5pv310/cpufreq.c
new file mode 100644
index 0000000..b04cbc7
--- /dev/null
+++ b/arch/arm/mach-s5pv310/cpufreq.c
@@ -0,0 +1,580 @@
+/* linux/arch/arm/mach-s5pv310/cpufreq.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - CPU frequency scaling support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/cpufreq.h>
+
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-mem.h>
+
+#include <plat/clock.h>
+#include <plat/pm.h>
+
+static struct clk *cpu_clk;
+static struct clk *moutcore;
+static struct clk *mout_mpll;
+static struct clk *mout_apll;
+
+#ifdef CONFIG_REGULATOR
+static struct regulator *arm_regulator;
+static struct regulator *int_regulator;
+#endif
+
+static struct cpufreq_freqs freqs;
+static unsigned int memtype;
+
+enum s5pv310_memory_type {
+	DDR2 = 4,
+	LPDDR2,
+	DDR3,
+};
+
+enum cpufreq_level_index {
+	L0, L1, L2, L3, CPUFREQ_LEVEL_END,
+};
+
+static struct cpufreq_frequency_table s5pv310_freq_table[] = {
+	{L0, 1000*1000},
+	{L1, 800*1000},
+	{L2, 400*1000},
+	{L3, 100*1000},
+	{0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
+	 *		DIVATB, DIVPCLK_DBG, DIVAPLL }
+	 */
+
+	/* ARM L0: 1000MHz */
+	{ 0, 3, 7, 3, 3, 0, 1 },
+
+	/* ARM L1: 800MHz */
+	{ 0, 3, 7, 3, 3, 0, 1 },
+
+	/* ARM L2: 400MHz */
+	{ 0, 1, 3, 1, 3, 0, 1 },
+
+	/* ARM L3: 100MHz */
+	{ 0, 0, 1, 0, 3, 1, 1 },
+};
+
+static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVCOPY, DIVHPM }
+	 */
+
+	 /* ARM L0: 1000MHz */
+	{ 3, 0 },
+
+	/* ARM L1: 800MHz */
+	{ 3, 0 },
+
+	/* ARM L2: 400MHz */
+	{ 3, 0 },
+
+	/* ARM L3: 100MHz */
+	{ 3, 0 },
+};
+
+static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+	 *		DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+	 */
+
+	/* DMC L0: 400MHz */
+	{ 3, 1, 1, 1, 1, 1, 3, 1 },
+
+	/* DMC L1: 400MHz */
+	{ 3, 1, 1, 1, 1, 1, 3, 1 },
+
+	/* DMC L2: 266.7MHz */
+	{ 7, 1, 1, 2, 1, 1, 3, 1 },
+
+	/* DMC L3: 200MHz */
+	{ 7, 1, 1, 3, 1, 1, 3, 1 },
+};
+
+static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+	 */
+
+	/* ACLK200 L0: 200MHz */
+	{ 3, 7, 4, 5, 1 },
+
+	/* ACLK200 L1: 200MHz */
+	{ 3, 7, 4, 5, 1 },
+
+	/* ACLK200 L2: 160MHz */
+	{ 4, 7, 5, 7, 1 },
+
+	/* ACLK200 L3: 133.3MHz */
+	{ 5, 7, 7, 7, 1 },
+};
+
+static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVGDL/R, DIVGPL/R }
+	 */
+
+	/* ACLK_GDL/R L0: 200MHz */
+	{ 3, 1 },
+
+	/* ACLK_GDL/R L1: 200MHz */
+	{ 3, 1 },
+
+	/* ACLK_GDL/R L2: 160MHz */
+	{ 4, 1 },
+
+	/* ACLK_GDL/R L3: 133.3MHz */
+	{ 5, 1 },
+};
+
+struct cpufreq_voltage_table {
+	unsigned int	index;		/* any */
+	unsigned int	arm_volt;	/* uV */
+	unsigned int	int_volt;
+};
+
+static struct cpufreq_voltage_table s5pv310_volt_table[CPUFREQ_LEVEL_END] = {
+	{
+		.index		= L0,
+		.arm_volt	= 1200000,
+		.int_volt	= 1100000,
+	}, {
+		.index		= L1,
+		.arm_volt	= 1100000,
+		.int_volt	= 1100000,
+	}, {
+		.index		= L2,
+		.arm_volt	= 1000000,
+		.int_volt	= 1000000,
+	}, {
+		.index		= L3,
+		.arm_volt	= 900000,
+		.int_volt	= 1000000,
+	},
+};
+
+static unsigned int s5pv310_apll_pms_table[CPUFREQ_LEVEL_END] = {
+	/* APLL FOUT L0: 1000MHz */
+	((250 << 16) | (6 << 8) | 1),
+
+	/* APLL FOUT L1: 800MHz */
+	((200 << 16) | (6 << 8) | 1),
+
+	/* APLL FOUT L2 : 400MHz */
+	((200 << 16) | (6 << 8) | 2),
+
+	/* APLL FOUT L3: 100MHz */
+	((200 << 16) | (6 << 8) | 4),
+};
+
+int s5pv310_verify_speed(struct cpufreq_policy *policy)
+{
+	return cpufreq_frequency_table_verify(policy, s5pv310_freq_table);
+}
+
+unsigned int s5pv310_getspeed(unsigned int cpu)
+{
+	return clk_get_rate(cpu_clk) / 1000;
+}
+
+void s5pv310_set_clkdiv(unsigned int div_index)
+{
+	unsigned int tmp;
+
+	/* Change Divider - CPU0 */
+
+	tmp = __raw_readl(S5P_CLKDIV_CPU);
+
+	tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
+		S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
+		S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
+		S5P_CLKDIV_CPU0_APLL_MASK);
+
+	tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
+		(clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
+		(clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
+		(clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
+		(clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
+		(clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
+		(clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_CPU);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STATCPU);
+	} while (tmp & 0x1111111);
+
+	/* Change Divider - CPU1 */
+
+	tmp = __raw_readl(S5P_CLKDIV_CPU1);
+
+	tmp &= ~((0x7 << 4) | 0x7);
+
+	tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
+		(clkdiv_cpu1[div_index][1] << 0));
+
+	__raw_writel(tmp, S5P_CLKDIV_CPU1);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
+	} while (tmp & 0x11);
+
+	/* Change Divider - DMC0 */
+
+	tmp = __raw_readl(S5P_CLKDIV_DMC0);
+
+	tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+		S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
+		S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
+		S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
+
+	tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
+		(clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+		(clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+		(clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
+		(clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+		(clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+		(clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+		(clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+	} while (tmp & 0x11111111);
+
+	/* Change Divider - TOP */
+
+	tmp = __raw_readl(S5P_CLKDIV_TOP);
+
+	tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
+		S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
+		S5P_CLKDIV_TOP_ONENAND_MASK);
+
+	tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+		(clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+		(clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+		(clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+		(clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_TOP);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+	} while (tmp & 0x11111);
+
+	/* Change Divider - LEFTBUS */
+
+	tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+	tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+	tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
+		(clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+	} while (tmp & 0x11);
+
+	/* Change Divider - RIGHTBUS */
+
+	tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+	tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+	tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
+		(clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+	} while (tmp & 0x11);
+}
+
+static void s5pv310_set_apll(unsigned int index)
+{
+	unsigned int tmp;
+
+	/* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+	clk_set_parent(moutcore, mout_mpll);
+
+	do {
+		tmp = (__raw_readl(S5P_CLKMUX_STATCPU)
+			>> S5P_CLKSRC_CPU_MUXCORE_SHIFT);
+		tmp &= 0x7;
+	} while (tmp != 0x2);
+
+	/* 2. Set APLL Lock time */
+	__raw_writel(S5P_APLL_LOCKTIME, S5P_APLL_LOCK);
+
+	/* 3. Change PLL PMS values */
+	tmp = __raw_readl(S5P_APLL_CON0);
+	tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
+	tmp |= s5pv310_apll_pms_table[index];
+	__raw_writel(tmp, S5P_APLL_CON0);
+
+	/* 4. wait_lock_time */
+	do {
+		tmp = __raw_readl(S5P_APLL_CON0);
+	} while (!(tmp & (0x1 << S5P_APLLCON0_LOCKED_SHIFT)));
+
+	/* 5. MUX_CORE_SEL = APLL */
+	clk_set_parent(moutcore, mout_apll);
+
+	do {
+		tmp = __raw_readl(S5P_CLKMUX_STATCPU);
+		tmp &= S5P_CLKMUX_STATCPU_MUXCORE_MASK;
+	} while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
+}
+
+static void s5pv310_set_frequency(unsigned int old_index, unsigned int new_index)
+{
+	unsigned int tmp;
+
+	if (old_index > new_index) {
+		/* The frequency changing to L0 needs to change apll */
+		if (freqs.new == s5pv310_freq_table[L0].frequency) {
+			/* 1. Change the system clock divider values */
+			s5pv310_set_clkdiv(new_index);
+
+			/* 2. Change the apll m,p,s value */
+			s5pv310_set_apll(new_index);
+		} else {
+			/* 1. Change the system clock divider values */
+			s5pv310_set_clkdiv(new_index);
+
+			/* 2. Change just s value in apll m,p,s value */
+			tmp = __raw_readl(S5P_APLL_CON0);
+			tmp &= ~(0x7 << 0);
+			tmp |= (s5pv310_apll_pms_table[new_index] & 0x7);
+			__raw_writel(tmp, S5P_APLL_CON0);
+		}
+	}
+
+	else if (old_index < new_index) {
+		/* The frequency changing from L0 needs to change apll */
+		if (freqs.old == s5pv310_freq_table[L0].frequency) {
+			/* 1. Change the apll m,p,s value */
+			s5pv310_set_apll(new_index);
+
+			/* 2. Change the system clock divider values */
+			s5pv310_set_clkdiv(new_index);
+		} else {
+			/* 1. Change just s value in apll m,p,s value */
+			tmp = __raw_readl(S5P_APLL_CON0);
+			tmp &= ~(0x7 << 0);
+			tmp |= (s5pv310_apll_pms_table[new_index] & 0x7);
+			__raw_writel(tmp, S5P_APLL_CON0);
+
+			/* 2. Change the system clock divider values */
+			s5pv310_set_clkdiv(new_index);
+		}
+	}
+}
+
+static int s5pv310_target(struct cpufreq_policy *policy,
+			  unsigned int target_freq,
+			  unsigned int relation)
+{
+	unsigned int index, old_index;
+	unsigned int arm_volt, int_volt;
+
+	freqs.old = s5pv310_getspeed(policy->cpu);
+
+	if (cpufreq_frequency_table_target(policy, s5pv310_freq_table,
+					   freqs.old, relation, &old_index))
+		return -EINVAL;
+
+	if (cpufreq_frequency_table_target(policy, s5pv310_freq_table,
+					   target_freq, relation, &index))
+		return -EINVAL;
+
+	freqs.new = s5pv310_freq_table[index].frequency;
+	freqs.cpu = policy->cpu;
+
+	if (freqs.new == freqs.old)
+		return 0;
+
+	/* get the voltage value */
+	arm_volt = s5pv310_volt_table[index].arm_volt;
+	int_volt = s5pv310_volt_table[index].int_volt;
+
+	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+	/* control regulator */
+	if (freqs.new > freqs.old) {
+		/* Voltage up */
+#ifdef CONFIG_REGULATOR
+		regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+		regulator_set_voltage(int_regulator, int_volt, int_volt);
+#endif
+	}
+
+	/* Clock Configuration Procedure */
+	s5pv310_set_frequency(old_index, index);
+
+	/* control regulator */
+	if (freqs.new < freqs.old) {
+		/* Voltage down */
+#ifdef CONFIG_REGULATOR
+		regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+		regulator_set_voltage(int_regulator, int_volt, int_volt);
+#endif
+	}
+
+	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy,
+				   pm_message_t pmsg)
+{
+	return 0;
+}
+
+static int s5pv310_cpufreq_resume(struct cpufreq_policy *policy)
+{
+	return 0;
+}
+#endif
+
+static int s5pv310_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	policy->cur = policy->min = policy->max = s5pv310_getspeed(policy->cpu);
+
+	cpufreq_frequency_table_get_attr(s5pv310_freq_table, policy->cpu);
+
+	/* set the transition latency value */
+	policy->cpuinfo.transition_latency = 100000;
+
+	/*
+	 * S5PV310 multi-core processors has 2 cores
+	 * that the frequency cannot be set independently.
+	 * Each cpu is bound to the same speed.
+	 * So the affected cpu is all of the cpus.
+	 */
+	cpumask_setall(policy->cpus);
+
+	return cpufreq_frequency_table_cpuinfo(policy, s5pv310_freq_table);
+}
+
+static struct cpufreq_driver s5pv310_driver = {
+	.flags		= CPUFREQ_STICKY,
+	.verify		= s5pv310_verify_speed,
+	.target		= s5pv310_target,
+	.get		= s5pv310_getspeed,
+	.init		= s5pv310_cpufreq_cpu_init,
+	.name		= "s5pv310_cpufreq",
+#ifdef CONFIG_PM
+	.suspend	= s5pv310_cpufreq_suspend,
+	.resume		= s5pv310_cpufreq_resume,
+#endif
+};
+
+static int __init s5pv310_cpufreq_init(void)
+{
+	cpu_clk = clk_get(NULL, "armclk");
+	if (IS_ERR(cpu_clk))
+		return PTR_ERR(cpu_clk);
+
+	moutcore = clk_get(NULL, "moutcore");
+	if (IS_ERR(moutcore))
+		goto out;
+
+	mout_mpll = clk_get(NULL, "mout_mpll");
+	if (IS_ERR(mout_mpll))
+		goto out;
+
+	mout_apll = clk_get(NULL, "mout_apll");
+	if (IS_ERR(mout_apll))
+		goto out;
+
+#ifdef CONFIG_REGULATOR
+	arm_regulator = regulator_get(NULL, "vdd_arm");
+	if (IS_ERR(arm_regulator)) {
+		printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
+		goto out;
+	}
+
+	int_regulator = regulator_get(NULL, "vdd_int");
+	if (IS_ERR(int_regulator)) {
+		printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
+		goto out;
+	}
+#endif
+
+	/*
+	 * Check DRAM type.
+	 * Because DVFS level is different according to DRAM type.
+	 */
+	memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET);
+	memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT);
+	memtype &= S5P_DMC0_MEMTYPE_MASK;
+
+	if ((memtype < DDR2) && (memtype > DDR3)) {
+		printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype);
+		goto out;
+	} else {
+		printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
+	}
+
+	return cpufreq_register_driver(&s5pv310_driver);
+
+out:
+	if (!IS_ERR(cpu_clk))
+		clk_put(cpu_clk);
+
+	if (!IS_ERR(moutcore))
+		clk_put(moutcore);
+
+	if (!IS_ERR(mout_mpll))
+		clk_put(mout_mpll);
+
+	if (!IS_ERR(mout_apll))
+		clk_put(mout_apll);
+
+#ifdef CONFIG_REGULATOR
+	if (!IS_ERR(arm_regulator))
+		regulator_put(arm_regulator);
+
+	if (!IS_ERR(int_regulator))
+		regulator_put(int_regulator);
+#endif
+
+	printk(KERN_ERR "%s: failed initialization\n", __func__);
+
+	return -EINVAL;
+}
+late_initcall(s5pv310_cpufreq_init);
diff --git a/arch/arm/mach-s5pv310/dev-pd.c b/arch/arm/mach-s5pv310/dev-pd.c
new file mode 100644
index 0000000..58a50c2
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dev-pd.c
@@ -0,0 +1,139 @@
+/* linux/arch/arm/mach-s5pv310/dev-pd.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - Power Domain support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <mach/regs-pmu.h>
+
+#include <plat/pd.h>
+
+static int s5pv310_pd_enable(struct device *dev)
+{
+	struct samsung_pd_info *pdata =  dev->platform_data;
+	u32 timeout;
+
+	__raw_writel(S5P_INT_LOCAL_PWR_EN, pdata->base);
+
+	/* Wait max 1ms */
+	timeout = 10;
+	while ((__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN)
+		!= S5P_INT_LOCAL_PWR_EN) {
+		if (timeout == 0) {
+			printk(KERN_ERR "Power domain %s enable failed.\n",
+				dev_name(dev));
+			return -ETIMEDOUT;
+		}
+		timeout--;
+		udelay(100);
+	}
+
+	return 0;
+}
+
+static int s5pv310_pd_disable(struct device *dev)
+{
+	struct samsung_pd_info *pdata =  dev->platform_data;
+	u32 timeout;
+
+	__raw_writel(0, pdata->base);
+
+	/* Wait max 1ms */
+	timeout = 10;
+	while (__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN) {
+		if (timeout == 0) {
+			printk(KERN_ERR "Power domain %s disable failed.\n",
+				dev_name(dev));
+			return -ETIMEDOUT;
+		}
+		timeout--;
+		udelay(100);
+	}
+
+	return 0;
+}
+
+struct platform_device s5pv310_device_pd[] = {
+	{
+		.name		= "samsung-pd",
+		.id		= 0,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_MFC_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 1,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_G3D_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 2,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_LCD0_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 3,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_LCD1_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 4,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_TV_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 5,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_CAM_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 6,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_GPS_CONF,
+			},
+		},
+	},
+};
diff --git a/arch/arm/mach-s5pv310/dev-sysmmu.c b/arch/arm/mach-s5pv310/dev-sysmmu.c
new file mode 100644
index 0000000..e1bb200
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dev-sysmmu.c
@@ -0,0 +1,187 @@
+/* linux/arch/arm/mach-s5pv310/dev-sysmmu.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+static struct resource s5pv310_sysmmu_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_SYSMMU_MDMA,
+		.end	= S5PV310_PA_SYSMMU_MDMA + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_SYSMMU_MDMA0_0,
+		.end	= IRQ_SYSMMU_MDMA0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[2] = {
+		.start	= S5PV310_PA_SYSMMU_SSS,
+		.end	= S5PV310_PA_SYSMMU_SSS + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[3] = {
+		.start	= IRQ_SYSMMU_SSS_0,
+		.end	= IRQ_SYSMMU_SSS_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[4] = {
+		.start	= S5PV310_PA_SYSMMU_FIMC0,
+		.end	= S5PV310_PA_SYSMMU_FIMC0 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[5] = {
+		.start	= IRQ_SYSMMU_FIMC0_0,
+		.end	= IRQ_SYSMMU_FIMC0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[6] = {
+		.start	= S5PV310_PA_SYSMMU_FIMC1,
+		.end	= S5PV310_PA_SYSMMU_FIMC1 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[7] = {
+		.start	= IRQ_SYSMMU_FIMC1_0,
+		.end	= IRQ_SYSMMU_FIMC1_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[8] = {
+		.start	= S5PV310_PA_SYSMMU_FIMC2,
+		.end	= S5PV310_PA_SYSMMU_FIMC2 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[9] = {
+		.start	= IRQ_SYSMMU_FIMC2_0,
+		.end	= IRQ_SYSMMU_FIMC2_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[10] = {
+		.start	= S5PV310_PA_SYSMMU_FIMC3,
+		.end	= S5PV310_PA_SYSMMU_FIMC3 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[11] = {
+		.start	= IRQ_SYSMMU_FIMC3_0,
+		.end	= IRQ_SYSMMU_FIMC3_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[12] = {
+		.start	= S5PV310_PA_SYSMMU_JPEG,
+		.end	= S5PV310_PA_SYSMMU_JPEG + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[13] = {
+		.start	= IRQ_SYSMMU_JPEG_0,
+		.end	= IRQ_SYSMMU_JPEG_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[14] = {
+		.start	= S5PV310_PA_SYSMMU_FIMD0,
+		.end	= S5PV310_PA_SYSMMU_FIMD0 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[15] = {
+		.start	= IRQ_SYSMMU_LCD0_M0_0,
+		.end	= IRQ_SYSMMU_LCD0_M0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[16] = {
+		.start	= S5PV310_PA_SYSMMU_FIMD1,
+		.end	= S5PV310_PA_SYSMMU_FIMD1 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[17] = {
+		.start	= IRQ_SYSMMU_LCD1_M1_0,
+		.end	= IRQ_SYSMMU_LCD1_M1_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[18] = {
+		.start	= S5PV310_PA_SYSMMU_PCIe,
+		.end	= S5PV310_PA_SYSMMU_PCIe + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[19] = {
+		.start	= IRQ_SYSMMU_PCIE_0,
+		.end	= IRQ_SYSMMU_PCIE_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[20] = {
+		.start	= S5PV310_PA_SYSMMU_G2D,
+		.end	= S5PV310_PA_SYSMMU_G2D + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[21] = {
+		.start	= IRQ_SYSMMU_2D_0,
+		.end	= IRQ_SYSMMU_2D_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[22] = {
+		.start	= S5PV310_PA_SYSMMU_ROTATOR,
+		.end	= S5PV310_PA_SYSMMU_ROTATOR + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[23] = {
+		.start	= IRQ_SYSMMU_ROTATOR_0,
+		.end	= IRQ_SYSMMU_ROTATOR_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[24] = {
+		.start	= S5PV310_PA_SYSMMU_MDMA2,
+		.end	= S5PV310_PA_SYSMMU_MDMA2 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[25] = {
+		.start	= IRQ_SYSMMU_MDMA1_0,
+		.end	= IRQ_SYSMMU_MDMA1_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[26] = {
+		.start	= S5PV310_PA_SYSMMU_TV,
+		.end	= S5PV310_PA_SYSMMU_TV + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[27] = {
+		.start	= IRQ_SYSMMU_TV_M0_0,
+		.end	= IRQ_SYSMMU_TV_M0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[28] = {
+		.start	= S5PV310_PA_SYSMMU_MFC_L,
+		.end	= S5PV310_PA_SYSMMU_MFC_L + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[29] = {
+		.start	= IRQ_SYSMMU_MFC_M0_0,
+		.end	= IRQ_SYSMMU_MFC_M0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[30] = {
+		.start	= S5PV310_PA_SYSMMU_MFC_R,
+		.end	= S5PV310_PA_SYSMMU_MFC_R + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[31] = {
+		.start	= IRQ_SYSMMU_MFC_M1_0,
+		.end	= IRQ_SYSMMU_MFC_M1_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+struct platform_device s5pv310_device_sysmmu = {
+	.name		= "s5p-sysmmu",
+	.id		= 32,
+	.num_resources	= ARRAY_SIZE(s5pv310_sysmmu_resource),
+	.resource	= s5pv310_sysmmu_resource,
+};
+
+EXPORT_SYMBOL(s5pv310_device_sysmmu);
diff --git a/arch/arm/mach-s5pv310/hotplug.c b/arch/arm/mach-s5pv310/hotplug.c
index afa5392..c24235c 100644
--- a/arch/arm/mach-s5pv310/hotplug.c
+++ b/arch/arm/mach-s5pv310/hotplug.c
@@ -30,10 +30,10 @@
 	 * Turn off coherency
 	 */
 	"	mrc	p15, 0, %0, c1, c0, 1\n"
-	"	bic	%0, %0, %2\n"
+	"	bic	%0, %0, #0x20\n"
 	"	mcr	p15, 0, %0, c1, c0, 1\n"
 	"	mrc	p15, 0, %0, c1, c0, 0\n"
-	"	bic	%0, %0, #0x04\n"
+	"	bic	%0, %0, %2\n"
 	"	mcr	p15, 0, %0, c1, c0, 0\n"
 	  : "=&r" (v)
 	  : "r" (0), "Ir" (CR_C)
diff --git a/arch/arm/mach-s5pv310/include/mach/irqs.h b/arch/arm/mach-s5pv310/include/mach/irqs.h
index 3c05c58..536b0b5 100644
--- a/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv310/include/mach/irqs.h
@@ -25,6 +25,8 @@
 
 #define IRQ_SPI(x)		S5P_IRQ(x+32)
 
+#define IRQ_MCT1		IRQ_SPI(35)
+
 #define IRQ_EINT0		IRQ_SPI(40)
 #define IRQ_EINT1		IRQ_SPI(41)
 #define IRQ_EINT2		IRQ_SPI(42)
@@ -36,9 +38,8 @@
 #define IRQ_JPEG		IRQ_SPI(48)
 #define IRQ_2D			IRQ_SPI(49)
 #define IRQ_PCIE		IRQ_SPI(50)
-#define IRQ_SYSTEM_TIMER	IRQ_SPI(51)
+#define IRQ_MCT0		IRQ_SPI(51)
 #define IRQ_MFC			IRQ_SPI(52)
-#define IRQ_WDT			IRQ_SPI(53)
 #define IRQ_AUDIO_SS		IRQ_SPI(54)
 #define IRQ_AC97		IRQ_SPI(55)
 #define IRQ_SPDIF		IRQ_SPI(56)
@@ -54,6 +55,24 @@
 #define COMBINER_GROUP(x)	((x) * MAX_IRQ_IN_COMBINER + IRQ_SPI(64))
 #define COMBINER_IRQ(x, y)	(COMBINER_GROUP(x) + y)
 
+#define IRQ_SYSMMU_MDMA0_0	COMBINER_IRQ(4, 0)
+#define IRQ_SYSMMU_SSS_0	COMBINER_IRQ(4, 1)
+#define IRQ_SYSMMU_FIMC0_0	COMBINER_IRQ(4, 2)
+#define IRQ_SYSMMU_FIMC1_0	COMBINER_IRQ(4, 3)
+#define IRQ_SYSMMU_FIMC2_0	COMBINER_IRQ(4, 4)
+#define IRQ_SYSMMU_FIMC3_0	COMBINER_IRQ(4, 5)
+#define IRQ_SYSMMU_JPEG_0	COMBINER_IRQ(4, 6)
+#define IRQ_SYSMMU_2D_0		COMBINER_IRQ(4, 7)
+
+#define IRQ_SYSMMU_ROTATOR_0	COMBINER_IRQ(5, 0)
+#define IRQ_SYSMMU_MDMA1_0	COMBINER_IRQ(5, 1)
+#define IRQ_SYSMMU_LCD0_M0_0	COMBINER_IRQ(5, 2)
+#define IRQ_SYSMMU_LCD1_M1_0	COMBINER_IRQ(5, 3)
+#define IRQ_SYSMMU_TV_M0_0	COMBINER_IRQ(5, 4)
+#define IRQ_SYSMMU_MFC_M0_0	COMBINER_IRQ(5, 5)
+#define IRQ_SYSMMU_MFC_M1_0	COMBINER_IRQ(5, 6)
+#define IRQ_SYSMMU_PCIE_0	COMBINER_IRQ(5, 7)
+
 #define IRQ_PDMA0		COMBINER_IRQ(21, 0)
 #define IRQ_PDMA1		COMBINER_IRQ(21, 1)
 
@@ -86,8 +105,13 @@
 #define IRQ_HSMMC2		COMBINER_IRQ(29, 2)
 #define IRQ_HSMMC3		COMBINER_IRQ(29, 3)
 
+#define IRQ_MIPI_CSIS0		COMBINER_IRQ(30, 0)
+#define IRQ_MIPI_CSIS1		COMBINER_IRQ(30, 1)
+
 #define IRQ_ONENAND_AUDI	COMBINER_IRQ(34, 0)
 
+#define IRQ_MCT_L1		COMBINER_IRQ(35, 3)
+
 #define IRQ_EINT4		COMBINER_IRQ(37, 0)
 #define IRQ_EINT5		COMBINER_IRQ(37, 1)
 #define IRQ_EINT6		COMBINER_IRQ(37, 2)
@@ -104,7 +128,11 @@
 
 #define IRQ_EINT16_31		COMBINER_IRQ(39, 0)
 
-#define MAX_COMBINER_NR		40
+#define IRQ_MCT_L0		COMBINER_IRQ(51, 0)
+
+#define IRQ_WDT			COMBINER_IRQ(53, 0)
+
+#define MAX_COMBINER_NR		54
 
 #define S5P_IRQ_EINT_BASE	COMBINER_IRQ(MAX_COMBINER_NR, 0)
 
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 5399446..74d4006 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -39,11 +39,15 @@
 #define S5PV310_PA_SYSCON		(0x10010000)
 #define S5P_PA_SYSCON			S5PV310_PA_SYSCON
 
+#define S5PV310_PA_PMU			(0x10020000)
+
 #define S5PV310_PA_CMU			(0x10030000)
 
 #define S5PV310_PA_WATCHDOG		(0x10060000)
 #define S5PV310_PA_RTC			(0x10070000)
 
+#define S5PV310_PA_DMC0			(0x10400000)
+
 #define S5PV310_PA_COMBINER		(0x10448000)
 
 #define S5PV310_PA_COREPERI		(0x10500000)
@@ -61,9 +65,13 @@
 #define S5PV310_PA_GPIO2		(0x11000000)
 #define S5PV310_PA_GPIO3		(0x03860000)
 
+#define S5PV310_PA_MIPI_CSIS0		0x11880000
+#define S5PV310_PA_MIPI_CSIS1		0x11890000
+
 #define S5PV310_PA_HSMMC(x)		(0x12510000 + ((x) * 0x10000))
 
 #define S5PV310_PA_SROMC		(0x12570000)
+#define S5P_PA_SROMC			S5PV310_PA_SROMC
 
 /* S/PDIF */
 #define S5PV310_PA_SPDIF	0xE1100000
@@ -100,6 +108,25 @@
 #define S5PV310_PA_SDRAM		(0x40000000)
 #define S5P_PA_SDRAM			S5PV310_PA_SDRAM
 
+#define S5PV310_PA_SYSMMU_MDMA		0x10A40000
+#define S5PV310_PA_SYSMMU_SSS		0x10A50000
+#define S5PV310_PA_SYSMMU_FIMC0		0x11A20000
+#define S5PV310_PA_SYSMMU_FIMC1		0x11A30000
+#define S5PV310_PA_SYSMMU_FIMC2		0x11A40000
+#define S5PV310_PA_SYSMMU_FIMC3		0x11A50000
+#define S5PV310_PA_SYSMMU_JPEG		0x11A60000
+#define S5PV310_PA_SYSMMU_FIMD0		0x11E20000
+#define S5PV310_PA_SYSMMU_FIMD1		0x12220000
+#define S5PV310_PA_SYSMMU_PCIe		0x12620000
+#define S5PV310_PA_SYSMMU_G2D		0x12A20000
+#define S5PV310_PA_SYSMMU_ROTATOR	0x12A30000
+#define S5PV310_PA_SYSMMU_MDMA2		0x12A40000
+#define S5PV310_PA_SYSMMU_TV		0x12E20000
+#define S5PV310_PA_SYSMMU_MFC_L		0x13620000
+#define S5PV310_PA_SYSMMU_MFC_R		0x13630000
+#define S5PV310_SYSMMU_TOTAL_IPNUM	16
+#define S5P_SYSMMU_TOTAL_IPNUM		S5PV310_SYSMMU_TOTAL_IPNUM
+
 /* compatibiltiy defines. */
 #define S3C_PA_UART			S5PV310_PA_UART
 #define S3C_PA_HSMMC0			S5PV310_PA_HSMMC(0)
@@ -116,5 +143,7 @@
 #define S3C_PA_IIC7			S5PV310_PA_IIC(7)
 #define S3C_PA_RTC			S5PV310_PA_RTC
 #define S3C_PA_WDT			S5PV310_PA_WATCHDOG
+#define S5P_PA_MIPI_CSIS0		S5PV310_PA_MIPI_CSIS0
+#define S5P_PA_MIPI_CSIS1		S5PV310_PA_MIPI_CSIS1
 
 #endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-clock.h b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
index f1028ca..b5c4ada 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
@@ -19,6 +19,12 @@
 
 #define S5P_INFORM0			S5P_CLKREG(0x800)
 
+#define S5P_CLKDIV_LEFTBUS		S5P_CLKREG(0x04500)
+#define S5P_CLKDIV_STAT_LEFTBUS		S5P_CLKREG(0x04600)
+
+#define S5P_CLKDIV_RIGHTBUS		S5P_CLKREG(0x08500)
+#define S5P_CLKDIV_STAT_RIGHTBUS	S5P_CLKREG(0x08600)
+
 #define S5P_EPLL_CON0			S5P_CLKREG(0x0C110)
 #define S5P_EPLL_CON1			S5P_CLKREG(0x0C114)
 #define S5P_VPLL_CON0			S5P_CLKREG(0x0C120)
@@ -58,6 +64,8 @@
 #define S5P_CLKSRC_MASK_PERIL0		S5P_CLKREG(0x0C350)
 #define S5P_CLKSRC_MASK_PERIL1		S5P_CLKREG(0x0C354)
 
+#define S5P_CLKDIV_STAT_TOP		S5P_CLKREG(0x0C610)
+
 #define S5P_CLKGATE_IP_CAM		S5P_CLKREG(0x0C920)
 #define S5P_CLKGATE_IP_IMAGE		S5P_CLKREG(0x0C930)
 #define S5P_CLKGATE_IP_LCD0		S5P_CLKREG(0x0C934)
@@ -66,8 +74,9 @@
 #define S5P_CLKGATE_IP_PERIL		S5P_CLKREG(0x0C950)
 #define S5P_CLKGATE_IP_PERIR		S5P_CLKREG(0x0C960)
 
-#define S5P_CLKSRC_CORE			S5P_CLKREG(0x10200)
-#define S5P_CLKDIV_CORE0		S5P_CLKREG(0x10500)
+#define S5P_CLKSRC_DMC			S5P_CLKREG(0x10200)
+#define S5P_CLKDIV_DMC0			S5P_CLKREG(0x10500)
+#define S5P_CLKDIV_STAT_DMC0		S5P_CLKREG(0x10600)
 
 #define S5P_APLL_LOCK			S5P_CLKREG(0x14000)
 #define S5P_MPLL_LOCK			S5P_CLKREG(0x14004)
@@ -80,10 +89,77 @@
 #define S5P_CLKMUX_STATCPU		S5P_CLKREG(0x14400)
 
 #define S5P_CLKDIV_CPU			S5P_CLKREG(0x14500)
+#define S5P_CLKDIV_CPU1			S5P_CLKREG(0x14504)
 #define S5P_CLKDIV_STATCPU		S5P_CLKREG(0x14600)
+#define S5P_CLKDIV_STATCPU1		S5P_CLKREG(0x14604)
 
 #define S5P_CLKGATE_SCLKCPU		S5P_CLKREG(0x14800)
 
+/* APLL_LOCK */
+#define S5P_APLL_LOCKTIME		(0x1C20)	/* 300us */
+
+/* APLL_CON0 */
+#define S5P_APLLCON0_ENABLE_SHIFT	(31)
+#define S5P_APLLCON0_LOCKED_SHIFT	(29)
+#define S5P_APLL_VAL_1000		((250 << 16) | (6 << 8) | 1)
+#define S5P_APLL_VAL_800		((200 << 16) | (6 << 8) | 1)
+
+/* CLK_SRC_CPU */
+#define S5P_CLKSRC_CPU_MUXCORE_SHIFT	(16)
+#define S5P_CLKMUX_STATCPU_MUXCORE_MASK	(0x7 << S5P_CLKSRC_CPU_MUXCORE_SHIFT)
+
+/* CLKDIV_CPU0 */
+#define S5P_CLKDIV_CPU0_CORE_SHIFT	(0)
+#define S5P_CLKDIV_CPU0_CORE_MASK	(0x7 << S5P_CLKDIV_CPU0_CORE_SHIFT)
+#define S5P_CLKDIV_CPU0_COREM0_SHIFT	(4)
+#define S5P_CLKDIV_CPU0_COREM0_MASK	(0x7 << S5P_CLKDIV_CPU0_COREM0_SHIFT)
+#define S5P_CLKDIV_CPU0_COREM1_SHIFT	(8)
+#define S5P_CLKDIV_CPU0_COREM1_MASK	(0x7 << S5P_CLKDIV_CPU0_COREM1_SHIFT)
+#define S5P_CLKDIV_CPU0_PERIPH_SHIFT	(12)
+#define S5P_CLKDIV_CPU0_PERIPH_MASK	(0x7 << S5P_CLKDIV_CPU0_PERIPH_SHIFT)
+#define S5P_CLKDIV_CPU0_ATB_SHIFT	(16)
+#define S5P_CLKDIV_CPU0_ATB_MASK	(0x7 << S5P_CLKDIV_CPU0_ATB_SHIFT)
+#define S5P_CLKDIV_CPU0_PCLKDBG_SHIFT	(20)
+#define S5P_CLKDIV_CPU0_PCLKDBG_MASK	(0x7 << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT)
+#define S5P_CLKDIV_CPU0_APLL_SHIFT	(24)
+#define S5P_CLKDIV_CPU0_APLL_MASK	(0x7 << S5P_CLKDIV_CPU0_APLL_SHIFT)
+
+/* CLKDIV_DMC0 */
+#define S5P_CLKDIV_DMC0_ACP_SHIFT	(0)
+#define S5P_CLKDIV_DMC0_ACP_MASK	(0x7 << S5P_CLKDIV_DMC0_ACP_SHIFT)
+#define S5P_CLKDIV_DMC0_ACPPCLK_SHIFT	(4)
+#define S5P_CLKDIV_DMC0_ACPPCLK_MASK	(0x7 << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT)
+#define S5P_CLKDIV_DMC0_DPHY_SHIFT	(8)
+#define S5P_CLKDIV_DMC0_DPHY_MASK	(0x7 << S5P_CLKDIV_DMC0_DPHY_SHIFT)
+#define S5P_CLKDIV_DMC0_DMC_SHIFT	(12)
+#define S5P_CLKDIV_DMC0_DMC_MASK	(0x7 << S5P_CLKDIV_DMC0_DMC_SHIFT)
+#define S5P_CLKDIV_DMC0_DMCD_SHIFT	(16)
+#define S5P_CLKDIV_DMC0_DMCD_MASK	(0x7 << S5P_CLKDIV_DMC0_DMCD_SHIFT)
+#define S5P_CLKDIV_DMC0_DMCP_SHIFT	(20)
+#define S5P_CLKDIV_DMC0_DMCP_MASK	(0x7 << S5P_CLKDIV_DMC0_DMCP_SHIFT)
+#define S5P_CLKDIV_DMC0_COPY2_SHIFT	(24)
+#define S5P_CLKDIV_DMC0_COPY2_MASK	(0x7 << S5P_CLKDIV_DMC0_COPY2_SHIFT)
+#define S5P_CLKDIV_DMC0_CORETI_SHIFT	(28)
+#define S5P_CLKDIV_DMC0_CORETI_MASK	(0x7 << S5P_CLKDIV_DMC0_CORETI_SHIFT)
+
+/* CLKDIV_TOP */
+#define S5P_CLKDIV_TOP_ACLK200_SHIFT	(0)
+#define S5P_CLKDIV_TOP_ACLK200_MASK	(0x7 << S5P_CLKDIV_TOP_ACLK200_SHIFT)
+#define S5P_CLKDIV_TOP_ACLK100_SHIFT	(4)
+#define S5P_CLKDIV_TOP_ACLK100_MASK	(0xf << S5P_CLKDIV_TOP_ACLK100_SHIFT)
+#define S5P_CLKDIV_TOP_ACLK160_SHIFT	(8)
+#define S5P_CLKDIV_TOP_ACLK160_MASK	(0x7 << S5P_CLKDIV_TOP_ACLK160_SHIFT)
+#define S5P_CLKDIV_TOP_ACLK133_SHIFT	(12)
+#define S5P_CLKDIV_TOP_ACLK133_MASK	(0x7 << S5P_CLKDIV_TOP_ACLK133_SHIFT)
+#define S5P_CLKDIV_TOP_ONENAND_SHIFT	(16)
+#define S5P_CLKDIV_TOP_ONENAND_MASK	(0x7 << S5P_CLKDIV_TOP_ONENAND_SHIFT)
+
+/* CLKDIV_LEFTBUS / CLKDIV_RIGHTBUS*/
+#define S5P_CLKDIV_BUS_GDLR_SHIFT	(0)
+#define S5P_CLKDIV_BUS_GDLR_MASK	(0x7 << S5P_CLKDIV_BUS_GDLR_SHIFT)
+#define S5P_CLKDIV_BUS_GPLR_SHIFT	(4)
+#define S5P_CLKDIV_BUS_GPLR_MASK	(0x7 << S5P_CLKDIV_BUS_GPLR_SHIFT)
+
 /* Compatibility defines */
 
 #define S5P_EPLL_CON			S5P_EPLL_CON0
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-mem.h b/arch/arm/mach-s5pv310/include/mach/regs-mem.h
new file mode 100644
index 0000000..8342271
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/regs-mem.h
@@ -0,0 +1,23 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/regs-mem.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - SROMC and DMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_MEM_H
+#define __ASM_ARCH_REGS_MEM_H __FILE__
+
+#include <mach/map.h>
+
+#define S5P_DMC0_MEMCON_OFFSET		0x04
+
+#define S5P_DMC0_MEMTYPE_SHIFT		8
+#define S5P_DMC0_MEMTYPE_MASK		0xF
+
+#endif /* __ASM_ARCH_REGS_MEM_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-pmu.h b/arch/arm/mach-s5pv310/include/mach/regs-pmu.h
new file mode 100644
index 0000000..fb333d0
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/regs-pmu.h
@@ -0,0 +1,30 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/regs-pmu.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - Power management unit definition
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_PMU_H
+#define __ASM_ARCH_REGS_PMU_H __FILE__
+
+#include <mach/map.h>
+
+#define S5P_PMUREG(x)			(S5P_VA_PMU + (x))
+
+#define S5P_PMU_CAM_CONF		S5P_PMUREG(0x3C00)
+#define S5P_PMU_TV_CONF		S5P_PMUREG(0x3C20)
+#define S5P_PMU_MFC_CONF		S5P_PMUREG(0x3C40)
+#define S5P_PMU_G3D_CONF		S5P_PMUREG(0x3C60)
+#define S5P_PMU_LCD0_CONF		S5P_PMUREG(0x3C80)
+#define S5P_PMU_LCD1_CONF		S5P_PMUREG(0x3CA0)
+#define S5P_PMU_GPS_CONF		S5P_PMUREG(0x3CE0)
+
+#define S5P_INT_LOCAL_PWR_EN		0x7
+
+#endif /* __ASM_ARCH_REGS_PMU_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-srom.h b/arch/arm/mach-s5pv310/include/mach/regs-srom.h
deleted file mode 100644
index 1898b3e..0000000
--- a/arch/arm/mach-s5pv310/include/mach/regs-srom.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/regs-srom.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * S5PV310 - SROMC register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_SROM_H
-#define __ASM_ARCH_REGS_SROM_H __FILE__
-
-#include <mach/map.h>
-
-#define S5PV310_SROMREG(x)	(S5P_VA_SROMC + (x))
-
-#define S5PV310_SROM_BW		S5PV310_SROMREG(0x0)
-#define S5PV310_SROM_BC0	S5PV310_SROMREG(0x4)
-#define S5PV310_SROM_BC1	S5PV310_SROMREG(0x8)
-#define S5PV310_SROM_BC2	S5PV310_SROMREG(0xc)
-#define S5PV310_SROM_BC3	S5PV310_SROMREG(0x10)
-
-/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
-
-#define S5PV310_SROM_BW__DATAWIDTH__SHIFT	0
-#define S5PV310_SROM_BW__ADDRMODE__SHIFT	1
-#define S5PV310_SROM_BW__WAITENABLE__SHIFT	2
-#define S5PV310_SROM_BW__BYTEENABLE__SHIFT	3
-
-#define S5PV310_SROM_BW__CS_MASK		0xf
-
-#define S5PV310_SROM_BW__NCS0__SHIFT		0
-#define S5PV310_SROM_BW__NCS1__SHIFT		4
-#define S5PV310_SROM_BW__NCS2__SHIFT		8
-#define S5PV310_SROM_BW__NCS3__SHIFT		12
-
-/* applies to same to BCS0 - BCS3 */
-
-#define S5PV310_SROM_BCX__PMC__SHIFT		0
-#define S5PV310_SROM_BCX__TACP__SHIFT		4
-#define S5PV310_SROM_BCX__TCAH__SHIFT		8
-#define S5PV310_SROM_BCX__TCOH__SHIFT		12
-#define S5PV310_SROM_BCX__TACC__SHIFT		16
-#define S5PV310_SROM_BCX__TCOS__SHIFT		24
-#define S5PV310_SROM_BCX__TACS__SHIFT		28
-
-#endif /* __ASM_ARCH_REGS_SROM_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h b/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h
new file mode 100644
index 0000000..0b28e81
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h
@@ -0,0 +1,24 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - System MMU register
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_SYSMMU_H
+#define __ASM_ARCH_REGS_SYSMMU_H __FILE__
+
+#define S5P_MMU_CTRL			0x000
+#define S5P_MMU_CFG			0x004
+#define S5P_MMU_STATUS			0x008
+#define S5P_MMU_FLUSH			0x00C
+#define S5P_PT_BASE_ADDR		0x014
+#define S5P_INT_STATUS			0x018
+#define S5P_PAGE_FAULT_ADDR		0x024
+
+#endif /* __ASM_ARCH_REGS_SYSMMU_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/sysmmu.h b/arch/arm/mach-s5pv310/include/mach/sysmmu.h
new file mode 100644
index 0000000..662fe85f
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/sysmmu.h
@@ -0,0 +1,119 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/sysmmu.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Samsung sysmmu driver for S5PV310
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARM_ARCH_SYSMMU_H
+#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
+
+enum s5pv310_sysmmu_ips {
+	SYSMMU_MDMA,
+	SYSMMU_SSS,
+	SYSMMU_FIMC0,
+	SYSMMU_FIMC1,
+	SYSMMU_FIMC2,
+	SYSMMU_FIMC3,
+	SYSMMU_JPEG,
+	SYSMMU_FIMD0,
+	SYSMMU_FIMD1,
+	SYSMMU_PCIe,
+	SYSMMU_G2D,
+	SYSMMU_ROTATOR,
+	SYSMMU_MDMA2,
+	SYSMMU_TV,
+	SYSMMU_MFC_L,
+	SYSMMU_MFC_R,
+};
+
+static char *sysmmu_ips_name[S5P_SYSMMU_TOTAL_IPNUM] = {
+	"SYSMMU_MDMA"	,
+	"SYSMMU_SSS"	,
+	"SYSMMU_FIMC0"	,
+	"SYSMMU_FIMC1"	,
+	"SYSMMU_FIMC2"	,
+	"SYSMMU_FIMC3"	,
+	"SYSMMU_JPEG"	,
+	"SYSMMU_FIMD0"	,
+	"SYSMMU_FIMD1"	,
+	"SYSMMU_PCIe"	,
+	"SYSMMU_G2D"	,
+	"SYSMMU_ROTATOR",
+	"SYSMMU_MDMA2"	,
+	"SYSMMU_TV"	,
+	"SYSMMU_MFC_L"	,
+	"SYSMMU_MFC_R"	,
+};
+
+typedef enum s5pv310_sysmmu_ips sysmmu_ips;
+
+struct sysmmu_tt_info {
+	unsigned long *pgd;
+	unsigned long pgd_paddr;
+	unsigned long *pte;
+};
+
+struct sysmmu_controller {
+	const char		*name;
+
+	/* channels registers */
+	void __iomem		*regs;
+
+	/* channel irq */
+	unsigned int		irq;
+
+	sysmmu_ips		ips;
+
+	/* Translation Table Info. */
+	struct sysmmu_tt_info	*tt_info;
+
+	struct resource		*mem;
+	struct device		*dev;
+
+	/* SysMMU controller enable - true : enable */
+	bool			enable;
+};
+
+/**
+ * s5p_sysmmu_enable() - enable system mmu of ip
+ * @ips: The ip connected system mmu.
+ *
+ * This function enable system mmu to transfer address
+ * from virtual address to physical address
+ */
+int s5p_sysmmu_enable(sysmmu_ips ips);
+
+/**
+ * s5p_sysmmu_disable() - disable sysmmu mmu of ip
+ * @ips: The ip connected system mmu.
+ *
+ * This function disable system mmu to transfer address
+ * from virtual address to physical address
+ */
+int s5p_sysmmu_disable(sysmmu_ips ips);
+
+/**
+ * s5p_sysmmu_set_tablebase_pgd() - set page table base address to refer page table
+ * @ips: The ip connected system mmu.
+ * @pgd: The page table base address.
+ *
+ * This function set page table base address
+ * When system mmu transfer address from virtaul address to physical address,
+ * system mmu refer address information from page table
+ */
+int s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd);
+
+/**
+ * s5p_sysmmu_tlb_invalidate() - flush all TLB entry in system mmu
+ * @ips: The ip connected system mmu.
+ *
+ * This function flush all TLB entry in system mmu
+ */
+int s5p_sysmmu_tlb_invalidate(sysmmu_ips ips);
+#endif /* __ASM_ARM_ARCH_SYSMMU_H */
diff --git a/arch/arm/mach-s5pv310/irq-combiner.c b/arch/arm/mach-s5pv310/irq-combiner.c
index c3f88c3..1ea4a9e 100644
--- a/arch/arm/mach-s5pv310/irq-combiner.c
+++ b/arch/arm/mach-s5pv310/irq-combiner.c
@@ -24,29 +24,32 @@
 
 struct combiner_chip_data {
 	unsigned int irq_offset;
+	unsigned int irq_mask;
 	void __iomem *base;
 };
 
 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
 
-static inline void __iomem *combiner_base(unsigned int irq)
+static inline void __iomem *combiner_base(struct irq_data *data)
 {
-	struct combiner_chip_data *combiner_data = get_irq_chip_data(irq);
+	struct combiner_chip_data *combiner_data =
+		irq_data_get_irq_chip_data(data);
+
 	return combiner_data->base;
 }
 
-static void combiner_mask_irq(unsigned int irq)
+static void combiner_mask_irq(struct irq_data *data)
 {
-	u32 mask = 1 << (irq % 32);
+	u32 mask = 1 << (data->irq % 32);
 
-	__raw_writel(mask, combiner_base(irq) + COMBINER_ENABLE_CLEAR);
+	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
 }
 
-static void combiner_unmask_irq(unsigned int irq)
+static void combiner_unmask_irq(struct irq_data *data)
 {
-	u32 mask = 1 << (irq % 32);
+	u32 mask = 1 << (data->irq % 32);
 
-	__raw_writel(mask, combiner_base(irq) + COMBINER_ENABLE_SET);
+	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
 }
 
 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
@@ -57,11 +60,12 @@
 	unsigned long status;
 
 	/* primary controller ack'ing */
-	chip->ack(irq);
+	chip->irq_ack(&desc->irq_data);
 
 	spin_lock(&irq_controller_lock);
 	status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
 	spin_unlock(&irq_controller_lock);
+	status &= chip_data->irq_mask;
 
 	if (status == 0)
 		goto out;
@@ -76,13 +80,13 @@
 
  out:
 	/* primary controller unmasking */
-	chip->unmask(irq);
+	chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip combiner_chip = {
 	.name		= "COMBINER",
-	.mask		= combiner_mask_irq,
-	.unmask		= combiner_unmask_irq,
+	.irq_mask	= combiner_mask_irq,
+	.irq_unmask	= combiner_unmask_irq,
 };
 
 void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
@@ -104,10 +108,12 @@
 
 	combiner_data[combiner_nr].base = base;
 	combiner_data[combiner_nr].irq_offset = irq_start;
+	combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
 
 	/* Disable all interrupts */
 
-	__raw_writel(0xffffffff, base + COMBINER_ENABLE_CLEAR);
+	__raw_writel(combiner_data[combiner_nr].irq_mask,
+		     base + COMBINER_ENABLE_CLEAR);
 
 	/* Setup the Linux IRQ subsystem */
 
diff --git a/arch/arm/mach-s5pv310/irq-eint.c b/arch/arm/mach-s5pv310/irq-eint.c
index 5877503..477bd9e 100644
--- a/arch/arm/mach-s5pv310/irq-eint.c
+++ b/arch/arm/mach-s5pv310/irq-eint.c
@@ -48,42 +48,43 @@
 	return ret;
 }
 
-static inline void s5pv310_irq_eint_mask(unsigned int irq)
+static inline void s5pv310_irq_eint_mask(struct irq_data *data)
 {
 	u32 mask;
 
 	spin_lock(&eint_lock);
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
-	mask |= eint_irq_to_bit(irq);
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask |= eint_irq_to_bit(data->irq);
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
 	spin_unlock(&eint_lock);
 }
 
-static void s5pv310_irq_eint_unmask(unsigned int irq)
+static void s5pv310_irq_eint_unmask(struct irq_data *data)
 {
 	u32 mask;
 
 	spin_lock(&eint_lock);
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
-	mask &= ~(eint_irq_to_bit(irq));
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask &= ~(eint_irq_to_bit(data->irq));
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
 	spin_unlock(&eint_lock);
 }
 
-static inline void s5pv310_irq_eint_ack(unsigned int irq)
+static inline void s5pv310_irq_eint_ack(struct irq_data *data)
 {
-	__raw_writel(eint_irq_to_bit(irq), S5P_EINT_PEND(EINT_REG_NR(irq)));
+	__raw_writel(eint_irq_to_bit(data->irq),
+		     S5P_EINT_PEND(EINT_REG_NR(data->irq)));
 }
 
-static void s5pv310_irq_eint_maskack(unsigned int irq)
+static void s5pv310_irq_eint_maskack(struct irq_data *data)
 {
-	s5pv310_irq_eint_mask(irq);
-	s5pv310_irq_eint_ack(irq);
+	s5pv310_irq_eint_mask(data);
+	s5pv310_irq_eint_ack(data);
 }
 
-static int s5pv310_irq_eint_set_type(unsigned int irq, unsigned int type)
+static int s5pv310_irq_eint_set_type(struct irq_data *data, unsigned int type)
 {
-	int offs = EINT_OFFSET(irq);
+	int offs = EINT_OFFSET(data->irq);
 	int shift;
 	u32 ctrl, mask;
 	u32 newvalue = 0;
@@ -118,10 +119,10 @@
 	mask = 0x7 << shift;
 
 	spin_lock(&eint_lock);
-	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(irq)));
+	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
 	ctrl &= ~mask;
 	ctrl |= newvalue << shift;
-	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(irq)));
+	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
 	spin_unlock(&eint_lock);
 
 	switch (offs) {
@@ -146,13 +147,13 @@
 
 static struct irq_chip s5pv310_irq_eint = {
 	.name		= "s5pv310-eint",
-	.mask		= s5pv310_irq_eint_mask,
-	.unmask		= s5pv310_irq_eint_unmask,
-	.mask_ack	= s5pv310_irq_eint_maskack,
-	.ack		= s5pv310_irq_eint_ack,
-	.set_type	= s5pv310_irq_eint_set_type,
+	.irq_mask	= s5pv310_irq_eint_mask,
+	.irq_unmask	= s5pv310_irq_eint_unmask,
+	.irq_mask_ack	= s5pv310_irq_eint_maskack,
+	.irq_ack	= s5pv310_irq_eint_ack,
+	.irq_set_type	= s5pv310_irq_eint_set_type,
 #ifdef CONFIG_PM
-	.set_wake	= s3c_irqext_wake,
+	.irq_set_wake	= s3c_irqext_wake,
 #endif
 };
 
@@ -192,14 +193,14 @@
 	u32 *irq_data = get_irq_data(irq);
 	struct irq_chip *chip = get_irq_chip(irq);
 
-	chip->mask(irq);
+	chip->irq_mask(&desc->irq_data);
 
-	if (chip->ack)
-		chip->ack(irq);
+	if (chip->irq_ack)
+		chip->irq_ack(&desc->irq_data);
 
 	generic_handle_irq(*irq_data);
 
-	chip->unmask(irq);
+	chip->irq_unmask(&desc->irq_data);
 }
 
 int __init s5pv310_init_irq_eint(void)
diff --git a/arch/arm/mach-s5pv310/mach-smdkc210.c b/arch/arm/mach-s5pv310/mach-smdkc210.c
index 2b8d4fc..2d49273 100644
--- a/arch/arm/mach-s5pv310/mach-smdkc210.c
+++ b/arch/arm/mach-s5pv310/mach-smdkc210.c
@@ -14,18 +14,21 @@
 #include <linux/platform_device.h>
 #include <linux/smsc911x.h>
 #include <linux/io.h>
+#include <linux/i2c.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 
 #include <plat/regs-serial.h>
+#include <plat/regs-srom.h>
 #include <plat/s5pv310.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/sdhci.h>
+#include <plat/iic.h>
+#include <plat/pd.h>
 
 #include <mach/map.h>
-#include <mach/regs-srom.h>
 
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKC210_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
@@ -139,14 +142,29 @@
 	},
 };
 
+static struct i2c_board_info i2c_devs1[] __initdata = {
+	{I2C_BOARD_INFO("wm8994", 0x1a),},
+};
+
 static struct platform_device *smdkc210_devices[] __initdata = {
 	&s3c_device_hsmmc0,
 	&s3c_device_hsmmc1,
 	&s3c_device_hsmmc2,
 	&s3c_device_hsmmc3,
+	&s3c_device_i2c1,
 	&s3c_device_rtc,
 	&s3c_device_wdt,
+	&s5pv310_device_ac97,
+	&s5pv310_device_i2s0,
+	&s5pv310_device_pd[PD_MFC],
+	&s5pv310_device_pd[PD_G3D],
+	&s5pv310_device_pd[PD_LCD0],
+	&s5pv310_device_pd[PD_LCD1],
+	&s5pv310_device_pd[PD_CAM],
+	&s5pv310_device_pd[PD_TV],
+	&s5pv310_device_pd[PD_GPS],
 	&smdkc210_smsc911x,
+	&s5pv310_device_sysmmu,
 };
 
 static void __init smdkc210_smsc911x_init(void)
@@ -154,23 +172,22 @@
 	u32 cs1;
 
 	/* configure nCS1 width to 16 bits */
-	cs1 = __raw_readl(S5PV310_SROM_BW) &
-		    ~(S5PV310_SROM_BW__CS_MASK <<
-				    S5PV310_SROM_BW__NCS1__SHIFT);
-	cs1 |= ((1 << S5PV310_SROM_BW__DATAWIDTH__SHIFT) |
-		(1 << S5PV310_SROM_BW__WAITENABLE__SHIFT) |
-		(1 << S5PV310_SROM_BW__BYTEENABLE__SHIFT)) <<
-		S5PV310_SROM_BW__NCS1__SHIFT;
-	__raw_writel(cs1, S5PV310_SROM_BW);
+	cs1 = __raw_readl(S5P_SROM_BW) &
+		~(S5P_SROM_BW__CS_MASK << S5P_SROM_BW__NCS1__SHIFT);
+	cs1 |= ((1 << S5P_SROM_BW__DATAWIDTH__SHIFT) |
+		(1 << S5P_SROM_BW__WAITENABLE__SHIFT) |
+		(1 << S5P_SROM_BW__BYTEENABLE__SHIFT)) <<
+		S5P_SROM_BW__NCS1__SHIFT;
+	__raw_writel(cs1, S5P_SROM_BW);
 
 	/* set timing for nCS1 suitable for ethernet chip */
-	__raw_writel((0x1 << S5PV310_SROM_BCX__PMC__SHIFT) |
-		     (0x9 << S5PV310_SROM_BCX__TACP__SHIFT) |
-		     (0xc << S5PV310_SROM_BCX__TCAH__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TCOH__SHIFT) |
-		     (0x6 << S5PV310_SROM_BCX__TACC__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TCOS__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TACS__SHIFT), S5PV310_SROM_BC1);
+	__raw_writel((0x1 << S5P_SROM_BCX__PMC__SHIFT) |
+		     (0x9 << S5P_SROM_BCX__TACP__SHIFT) |
+		     (0xc << S5P_SROM_BCX__TCAH__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TCOH__SHIFT) |
+		     (0x6 << S5P_SROM_BCX__TACC__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TCOS__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TACS__SHIFT), S5P_SROM_BC1);
 }
 
 static void __init smdkc210_map_io(void)
@@ -182,6 +199,9 @@
 
 static void __init smdkc210_machine_init(void)
 {
+	s3c_i2c1_set_platdata(NULL);
+	i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
+
 	smdkc210_smsc911x_init();
 
 	s3c_sdhci0_set_platdata(&smdkc210_hsmmc0_pdata);
diff --git a/arch/arm/mach-s5pv310/mach-smdkv310.c b/arch/arm/mach-s5pv310/mach-smdkv310.c
index 35826d6..28680cf 100644
--- a/arch/arm/mach-s5pv310/mach-smdkv310.c
+++ b/arch/arm/mach-s5pv310/mach-smdkv310.c
@@ -14,18 +14,21 @@
 #include <linux/platform_device.h>
 #include <linux/smsc911x.h>
 #include <linux/io.h>
+#include <linux/i2c.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 
 #include <plat/regs-serial.h>
+#include <plat/regs-srom.h>
 #include <plat/s5pv310.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/sdhci.h>
+#include <plat/iic.h>
+#include <plat/pd.h>
 
 #include <mach/map.h>
-#include <mach/regs-srom.h>
 
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKV310_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
@@ -139,14 +142,29 @@
 	},
 };
 
+static struct i2c_board_info i2c_devs1[] __initdata = {
+	{I2C_BOARD_INFO("wm8994", 0x1a),},
+};
+
 static struct platform_device *smdkv310_devices[] __initdata = {
 	&s3c_device_hsmmc0,
 	&s3c_device_hsmmc1,
 	&s3c_device_hsmmc2,
 	&s3c_device_hsmmc3,
+	&s3c_device_i2c1,
 	&s3c_device_rtc,
 	&s3c_device_wdt,
+	&s5pv310_device_ac97,
+	&s5pv310_device_i2s0,
+	&s5pv310_device_pd[PD_MFC],
+	&s5pv310_device_pd[PD_G3D],
+	&s5pv310_device_pd[PD_LCD0],
+	&s5pv310_device_pd[PD_LCD1],
+	&s5pv310_device_pd[PD_CAM],
+	&s5pv310_device_pd[PD_TV],
+	&s5pv310_device_pd[PD_GPS],
 	&smdkv310_smsc911x,
+	&s5pv310_device_sysmmu,
 };
 
 static void __init smdkv310_smsc911x_init(void)
@@ -154,23 +172,22 @@
 	u32 cs1;
 
 	/* configure nCS1 width to 16 bits */
-	cs1 = __raw_readl(S5PV310_SROM_BW) &
-		    ~(S5PV310_SROM_BW__CS_MASK <<
-				    S5PV310_SROM_BW__NCS1__SHIFT);
-	cs1 |= ((1 << S5PV310_SROM_BW__DATAWIDTH__SHIFT) |
-		(1 << S5PV310_SROM_BW__WAITENABLE__SHIFT) |
-		(1 << S5PV310_SROM_BW__BYTEENABLE__SHIFT)) <<
-		S5PV310_SROM_BW__NCS1__SHIFT;
-	__raw_writel(cs1, S5PV310_SROM_BW);
+	cs1 = __raw_readl(S5P_SROM_BW) &
+		~(S5P_SROM_BW__CS_MASK << S5P_SROM_BW__NCS1__SHIFT);
+	cs1 |= ((1 << S5P_SROM_BW__DATAWIDTH__SHIFT) |
+		(1 << S5P_SROM_BW__WAITENABLE__SHIFT) |
+		(1 << S5P_SROM_BW__BYTEENABLE__SHIFT)) <<
+		S5P_SROM_BW__NCS1__SHIFT;
+	__raw_writel(cs1, S5P_SROM_BW);
 
 	/* set timing for nCS1 suitable for ethernet chip */
-	__raw_writel((0x1 << S5PV310_SROM_BCX__PMC__SHIFT) |
-		     (0x9 << S5PV310_SROM_BCX__TACP__SHIFT) |
-		     (0xc << S5PV310_SROM_BCX__TCAH__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TCOH__SHIFT) |
-		     (0x6 << S5PV310_SROM_BCX__TACC__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TCOS__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TACS__SHIFT), S5PV310_SROM_BC1);
+	__raw_writel((0x1 << S5P_SROM_BCX__PMC__SHIFT) |
+		     (0x9 << S5P_SROM_BCX__TACP__SHIFT) |
+		     (0xc << S5P_SROM_BCX__TCAH__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TCOH__SHIFT) |
+		     (0x6 << S5P_SROM_BCX__TACC__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TCOS__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TACS__SHIFT), S5P_SROM_BC1);
 }
 
 static void __init smdkv310_map_io(void)
@@ -182,6 +199,9 @@
 
 static void __init smdkv310_machine_init(void)
 {
+	s3c_i2c1_set_platdata(NULL);
+	i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
+
 	smdkv310_smsc911x_init();
 
 	s3c_sdhci0_set_platdata(&smdkv310_hsmmc0_pdata);
diff --git a/arch/arm/mach-s5pv310/mach-universal_c210.c b/arch/arm/mach-s5pv310/mach-universal_c210.c
index 16d8fc0..36bc3cf 100644
--- a/arch/arm/mach-s5pv310/mach-universal_c210.c
+++ b/arch/arm/mach-s5pv310/mach-universal_c210.c
@@ -13,6 +13,9 @@
 #include <linux/i2c.h>
 #include <linux/gpio_keys.h>
 #include <linux/gpio.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/mmc/host.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
@@ -21,6 +24,7 @@
 #include <plat/s5pv310.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
+#include <plat/sdhci.h>
 
 #include <mach/map.h>
 
@@ -116,6 +120,73 @@
 	},
 };
 
+/* eMMC */
+static struct s3c_sdhci_platdata universal_hsmmc0_data __initdata = {
+	.max_width		= 8,
+	.host_caps		= (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
+				MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+				MMC_CAP_DISABLE),
+	.cd_type		= S3C_SDHCI_CD_PERMANENT,
+	.clk_type		= S3C_SDHCI_CLK_DIV_EXTERNAL,
+};
+
+static struct regulator_consumer_supply mmc0_supplies[] = {
+	REGULATOR_SUPPLY("vmmc", "s3c-sdhci.0"),
+};
+
+static struct regulator_init_data mmc0_fixed_voltage_init_data = {
+	.constraints		= {
+		.name		= "VMEM_VDD_2.8V",
+		.valid_ops_mask	= REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(mmc0_supplies),
+	.consumer_supplies	= mmc0_supplies,
+};
+
+static struct fixed_voltage_config mmc0_fixed_voltage_config = {
+	.supply_name		= "MASSMEMORY_EN",
+	.microvolts		= 2800000,
+	.gpio			= S5PV310_GPE1(3),
+	.enable_high		= true,
+	.init_data		= &mmc0_fixed_voltage_init_data,
+};
+
+static struct platform_device mmc0_fixed_voltage = {
+	.name			= "reg-fixed-voltage",
+	.id			= 0,
+	.dev			= {
+		.platform_data	= &mmc0_fixed_voltage_config,
+	},
+};
+
+/* SD */
+static struct s3c_sdhci_platdata universal_hsmmc2_data __initdata = {
+	.max_width		= 4,
+	.host_caps		= MMC_CAP_4_BIT_DATA |
+				MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+				MMC_CAP_DISABLE,
+	.ext_cd_gpio		= S5PV310_GPX3(4),      /* XEINT_28 */
+	.ext_cd_gpio_invert	= 1,
+	.cd_type		= S3C_SDHCI_CD_GPIO,
+	.clk_type		= S3C_SDHCI_CLK_DIV_EXTERNAL,
+};
+
+/* WiFi */
+static struct s3c_sdhci_platdata universal_hsmmc3_data __initdata = {
+	.max_width		= 4,
+	.host_caps		= MMC_CAP_4_BIT_DATA |
+				MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+				MMC_CAP_DISABLE,
+	.cd_type		= S3C_SDHCI_CD_EXTERNAL,
+};
+
+static void __init universal_sdhci_init(void)
+{
+	s3c_sdhci0_set_platdata(&universal_hsmmc0_data);
+	s3c_sdhci2_set_platdata(&universal_hsmmc2_data);
+	s3c_sdhci3_set_platdata(&universal_hsmmc3_data);
+}
+
 /* I2C0 */
 static struct i2c_board_info i2c0_devs[] __initdata = {
 	/* Camera, To be updated */
@@ -127,6 +198,13 @@
 };
 
 static struct platform_device *universal_devices[] __initdata = {
+	/* Samsung Platform Devices */
+	&mmc0_fixed_voltage,
+	&s3c_device_hsmmc0,
+	&s3c_device_hsmmc2,
+	&s3c_device_hsmmc3,
+
+	/* Universal Devices */
 	&universal_gpio_keys,
 	&s5p_device_onenand,
 };
@@ -140,6 +218,8 @@
 
 static void __init universal_machine_init(void)
 {
+	universal_sdhci_init();
+
 	i2c_register_board_info(0, i2c0_devs, ARRAY_SIZE(i2c0_devs));
 	i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs));
 
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 59d14f0..e21f347 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -21,7 +21,6 @@
 #include <asm/div64.h>
 #include <mach/hardware.h>
 #include <asm/system.h>
-#include <asm/pgtable.h>
 #include <asm/mach/map.h>
 #include <asm/mach/flash.h>
 #include <asm/irq.h>
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c
index 3093d46..3d85dfa 100644
--- a/arch/arm/mach-sa1100/irq.c
+++ b/arch/arm/mach-sa1100/irq.c
@@ -37,14 +37,14 @@
 #define GPIO_11_27_IRQ(i)	((i) - 21)
 #define GPIO11_27_MASK(irq)	(1 << GPIO_11_27_IRQ(irq))
 
-static int sa1100_gpio_type(unsigned int irq, unsigned int type)
+static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
 {
 	unsigned int mask;
 
-	if (irq <= 10)
-		mask = 1 << irq;
+	if (d->irq <= 10)
+		mask = 1 << d->irq;
 	else
-		mask = GPIO11_27_MASK(irq);
+		mask = GPIO11_27_MASK(d->irq);
 
 	if (type == IRQ_TYPE_PROBE) {
 		if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
@@ -70,37 +70,37 @@
 /*
  * GPIO IRQs must be acknowledged.  This is for IRQs from 0 to 10.
  */
-static void sa1100_low_gpio_ack(unsigned int irq)
+static void sa1100_low_gpio_ack(struct irq_data *d)
 {
-	GEDR = (1 << irq);
+	GEDR = (1 << d->irq);
 }
 
-static void sa1100_low_gpio_mask(unsigned int irq)
+static void sa1100_low_gpio_mask(struct irq_data *d)
 {
-	ICMR &= ~(1 << irq);
+	ICMR &= ~(1 << d->irq);
 }
 
-static void sa1100_low_gpio_unmask(unsigned int irq)
+static void sa1100_low_gpio_unmask(struct irq_data *d)
 {
-	ICMR |= 1 << irq;
+	ICMR |= 1 << d->irq;
 }
 
-static int sa1100_low_gpio_wake(unsigned int irq, unsigned int on)
+static int sa1100_low_gpio_wake(struct irq_data *d, unsigned int on)
 {
 	if (on)
-		PWER |= 1 << irq;
+		PWER |= 1 << d->irq;
 	else
-		PWER &= ~(1 << irq);
+		PWER &= ~(1 << d->irq);
 	return 0;
 }
 
 static struct irq_chip sa1100_low_gpio_chip = {
 	.name		= "GPIO-l",
-	.ack		= sa1100_low_gpio_ack,
-	.mask		= sa1100_low_gpio_mask,
-	.unmask		= sa1100_low_gpio_unmask,
-	.set_type	= sa1100_gpio_type,
-	.set_wake	= sa1100_low_gpio_wake,
+	.irq_ack	= sa1100_low_gpio_ack,
+	.irq_mask	= sa1100_low_gpio_mask,
+	.irq_unmask	= sa1100_low_gpio_unmask,
+	.irq_set_type	= sa1100_gpio_type,
+	.irq_set_wake	= sa1100_low_gpio_wake,
 };
 
 /*
@@ -139,16 +139,16 @@
  * In addition, the IRQs are all collected up into one bit in the
  * interrupt controller registers.
  */
-static void sa1100_high_gpio_ack(unsigned int irq)
+static void sa1100_high_gpio_ack(struct irq_data *d)
 {
-	unsigned int mask = GPIO11_27_MASK(irq);
+	unsigned int mask = GPIO11_27_MASK(d->irq);
 
 	GEDR = mask;
 }
 
-static void sa1100_high_gpio_mask(unsigned int irq)
+static void sa1100_high_gpio_mask(struct irq_data *d)
 {
-	unsigned int mask = GPIO11_27_MASK(irq);
+	unsigned int mask = GPIO11_27_MASK(d->irq);
 
 	GPIO_IRQ_mask &= ~mask;
 
@@ -156,9 +156,9 @@
 	GFER &= ~mask;
 }
 
-static void sa1100_high_gpio_unmask(unsigned int irq)
+static void sa1100_high_gpio_unmask(struct irq_data *d)
 {
-	unsigned int mask = GPIO11_27_MASK(irq);
+	unsigned int mask = GPIO11_27_MASK(d->irq);
 
 	GPIO_IRQ_mask |= mask;
 
@@ -166,44 +166,44 @@
 	GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
 }
 
-static int sa1100_high_gpio_wake(unsigned int irq, unsigned int on)
+static int sa1100_high_gpio_wake(struct irq_data *d, unsigned int on)
 {
 	if (on)
-		PWER |= GPIO11_27_MASK(irq);
+		PWER |= GPIO11_27_MASK(d->irq);
 	else
-		PWER &= ~GPIO11_27_MASK(irq);
+		PWER &= ~GPIO11_27_MASK(d->irq);
 	return 0;
 }
 
 static struct irq_chip sa1100_high_gpio_chip = {
 	.name		= "GPIO-h",
-	.ack		= sa1100_high_gpio_ack,
-	.mask		= sa1100_high_gpio_mask,
-	.unmask		= sa1100_high_gpio_unmask,
-	.set_type	= sa1100_gpio_type,
-	.set_wake	= sa1100_high_gpio_wake,
+	.irq_ack	= sa1100_high_gpio_ack,
+	.irq_mask	= sa1100_high_gpio_mask,
+	.irq_unmask	= sa1100_high_gpio_unmask,
+	.irq_set_type	= sa1100_gpio_type,
+	.irq_set_wake	= sa1100_high_gpio_wake,
 };
 
 /*
  * We don't need to ACK IRQs on the SA1100 unless they're GPIOs
  * this is for internal IRQs i.e. from 11 to 31.
  */
-static void sa1100_mask_irq(unsigned int irq)
+static void sa1100_mask_irq(struct irq_data *d)
 {
-	ICMR &= ~(1 << irq);
+	ICMR &= ~(1 << d->irq);
 }
 
-static void sa1100_unmask_irq(unsigned int irq)
+static void sa1100_unmask_irq(struct irq_data *d)
 {
-	ICMR |= (1 << irq);
+	ICMR |= (1 << d->irq);
 }
 
 /*
  * Apart form GPIOs, only the RTC alarm can be a wakeup event.
  */
-static int sa1100_set_wake(unsigned int irq, unsigned int on)
+static int sa1100_set_wake(struct irq_data *d, unsigned int on)
 {
-	if (irq == IRQ_RTCAlrm) {
+	if (d->irq == IRQ_RTCAlrm) {
 		if (on)
 			PWER |= PWER_RTC;
 		else
@@ -215,10 +215,10 @@
 
 static struct irq_chip sa1100_normal_chip = {
 	.name		= "SC",
-	.ack		= sa1100_mask_irq,
-	.mask		= sa1100_mask_irq,
-	.unmask		= sa1100_unmask_irq,
-	.set_wake	= sa1100_set_wake,
+	.irq_ack	= sa1100_mask_irq,
+	.irq_mask	= sa1100_mask_irq,
+	.irq_unmask	= sa1100_unmask_irq,
+	.irq_set_wake	= sa1100_set_wake,
 };
 
 static struct resource irq_resource = {
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index c601a75..4aad01f 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -35,7 +35,7 @@
 		/*
 		 * Acknowledge the parent IRQ.
 		 */
-		desc->chip->ack(irq);
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 		/*
 		 * Read the interrupt reason register.  Let's have all
@@ -53,7 +53,7 @@
 		 * recheck the register for any pending IRQs.
 		 */
 		if (irr & (IRR_ETHERNET | IRR_USAR)) {
-			desc->chip->mask(irq);
+			desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 			/*
 			 * Ack the interrupt now to prevent re-entering
@@ -61,7 +61,7 @@
 			 * since we'll check the IRR register prior to
 			 * leaving.
 			 */
-			desc->chip->ack(irq);
+			desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 			if (irr & IRR_ETHERNET) {
 				generic_handle_irq(IRQ_NEPONSET_SMC9196);
@@ -71,7 +71,7 @@
 				generic_handle_irq(IRQ_NEPONSET_USAR);
 			}
 
-			desc->chip->unmask(irq);
+			desc->irq_data.chip->irq_unmask(&desc->irq_data);
 		}
 
 		if (irr & IRR_SA1111) {
diff --git a/arch/arm/mach-shark/irq.c b/arch/arm/mach-shark/irq.c
index c04eb6a..831fc66 100644
--- a/arch/arm/mach-shark/irq.c
+++ b/arch/arm/mach-shark/irq.c
@@ -30,35 +30,35 @@
  * These have to be protected by the irq controller spinlock
  * before being called.
  */
-static void shark_disable_8259A_irq(unsigned int irq)
+static void shark_disable_8259A_irq(struct irq_data *d)
 {
 	unsigned int mask;
-	if (irq<8) {
-	  mask = 1 << irq;
+	if (d->irq<8) {
+	  mask = 1 << d->irq;
 	  cached_irq_mask[0] |= mask;
 	  outb(cached_irq_mask[1],0xA1);
 	} else {
-	  mask = 1 << (irq-8);
+	  mask = 1 << (d->irq-8);
 	  cached_irq_mask[1] |= mask;
 	  outb(cached_irq_mask[0],0x21);
 	}
 }
 
-static void shark_enable_8259A_irq(unsigned int irq)
+static void shark_enable_8259A_irq(struct irq_data *d)
 {
 	unsigned int mask;
-	if (irq<8) {
-	  mask = ~(1 << irq);
+	if (d->irq<8) {
+	  mask = ~(1 << d->irq);
 	  cached_irq_mask[0] &= mask;
 	  outb(cached_irq_mask[0],0x21);
 	} else {
-	  mask = ~(1 << (irq-8));
+	  mask = ~(1 << (d->irq-8));
 	  cached_irq_mask[1] &= mask;
 	  outb(cached_irq_mask[1],0xA1);
 	}
 }
 
-static void shark_ack_8259A_irq(unsigned int irq){}
+static void shark_ack_8259A_irq(struct irq_data *d){}
 
 static irqreturn_t bogus_int(int irq, void *dev_id)
 {
@@ -69,10 +69,10 @@
 static struct irqaction cascade;
 
 static struct irq_chip fb_chip = {
-	.name	= "XT-PIC",
-	.ack	= shark_ack_8259A_irq,
-	.mask	= shark_disable_8259A_irq,
-	.unmask = shark_enable_8259A_irq,
+	.name		= "XT-PIC",
+	.irq_ack	= shark_ack_8259A_irq,
+	.irq_mask	= shark_disable_8259A_irq,
+	.irq_unmask	= shark_enable_8259A_irq,
 };
 
 void __init shark_init_irq(void)
diff --git a/arch/arm/mach-stmp378x/stmp378x.c b/arch/arm/mach-stmp378x/stmp378x.c
index ddd49a7..c2f9fe04 100644
--- a/arch/arm/mach-stmp378x/stmp378x.c
+++ b/arch/arm/mach-stmp378x/stmp378x.c
@@ -47,7 +47,7 @@
 /*
  * IRQ handling
  */
-static void stmp378x_ack_irq(unsigned int irq)
+static void stmp378x_ack_irq(struct irq_data *d)
 {
 	/* Tell ICOLL to release IRQ line */
 	__raw_writel(0, REGS_ICOLL_BASE + HW_ICOLL_VECTOR);
@@ -60,24 +60,24 @@
 	(void)__raw_readl(REGS_ICOLL_BASE + HW_ICOLL_STAT);
 }
 
-static void stmp378x_mask_irq(unsigned int irq)
+static void stmp378x_mask_irq(struct irq_data *d)
 {
 	/* IRQ disable */
 	stmp3xxx_clearl(BM_ICOLL_INTERRUPTn_ENABLE,
-			REGS_ICOLL_BASE + HW_ICOLL_INTERRUPTn + irq * 0x10);
+			REGS_ICOLL_BASE + HW_ICOLL_INTERRUPTn + d->irq * 0x10);
 }
 
-static void stmp378x_unmask_irq(unsigned int irq)
+static void stmp378x_unmask_irq(struct irq_data *d)
 {
 	/* IRQ enable */
 	stmp3xxx_setl(BM_ICOLL_INTERRUPTn_ENABLE,
-		      REGS_ICOLL_BASE + HW_ICOLL_INTERRUPTn + irq * 0x10);
+		      REGS_ICOLL_BASE + HW_ICOLL_INTERRUPTn + d->irq * 0x10);
 }
 
 static struct irq_chip stmp378x_chip = {
-	.ack	= stmp378x_ack_irq,
-	.mask	= stmp378x_mask_irq,
-	.unmask = stmp378x_unmask_irq,
+	.irq_ack	= stmp378x_ack_irq,
+	.irq_mask	= stmp378x_mask_irq,
+	.irq_unmask	= stmp378x_unmask_irq,
 };
 
 void __init stmp378x_init_irq(void)
diff --git a/arch/arm/mach-stmp37xx/stmp37xx.c b/arch/arm/mach-stmp37xx/stmp37xx.c
index 8c7d6fb..a9aed06 100644
--- a/arch/arm/mach-stmp37xx/stmp37xx.c
+++ b/arch/arm/mach-stmp37xx/stmp37xx.c
@@ -43,11 +43,11 @@
 /*
  * IRQ handling
  */
-static void stmp37xx_ack_irq(unsigned int irq)
+static void stmp37xx_ack_irq(struct irq_data *d)
 {
 	/* Disable IRQ */
-	stmp3xxx_clearl(0x04 << ((irq % 4) * 8),
-		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + irq / 4 * 0x10);
+	stmp3xxx_clearl(0x04 << ((d->irq % 4) * 8),
+		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + d->irq / 4 * 0x10);
 
 	/* ACK current interrupt */
 	__raw_writel(1, REGS_ICOLL_BASE + HW_ICOLL_LEVELACK);
@@ -56,24 +56,24 @@
 	(void)__raw_readl(REGS_ICOLL_BASE + HW_ICOLL_STAT);
 }
 
-static void stmp37xx_mask_irq(unsigned int irq)
+static void stmp37xx_mask_irq(struct irq_data *d)
 {
 	/* IRQ disable */
-	stmp3xxx_clearl(0x04 << ((irq % 4) * 8),
-		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + irq / 4 * 0x10);
+	stmp3xxx_clearl(0x04 << ((d->irq % 4) * 8),
+		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + d->irq / 4 * 0x10);
 }
 
-static void stmp37xx_unmask_irq(unsigned int irq)
+static void stmp37xx_unmask_irq(struct irq_data *d)
 {
 	/* IRQ enable */
-	stmp3xxx_setl(0x04 << ((irq % 4) * 8),
-		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + irq / 4 * 0x10);
+	stmp3xxx_setl(0x04 << ((d->irq % 4) * 8),
+		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + d->irq / 4 * 0x10);
 }
 
 static struct irq_chip stmp37xx_chip = {
-	.ack	= stmp37xx_ack_irq,
-	.mask	= stmp37xx_mask_irq,
-	.unmask = stmp37xx_unmask_irq,
+	.irq_ack	= stmp37xx_ack_irq,
+	.irq_mask	= stmp37xx_mask_irq,
+	.irq_unmask	= stmp37xx_unmask_irq,
 };
 
 void __init stmp37xx_init_irq(void)
diff --git a/arch/arm/mach-tcc8k/irq.c b/arch/arm/mach-tcc8k/irq.c
index 34575c4..aa9231f 100644
--- a/arch/arm/mach-tcc8k/irq.c
+++ b/arch/arm/mach-tcc8k/irq.c
@@ -18,65 +18,65 @@
 #include "common.h"
 
 /* Disable IRQ */
-static void tcc8000_mask_ack_irq0(unsigned int irq)
+static void tcc8000_mask_ack_irq0(struct irq_data *d)
 {
-	PIC0_IEN &= ~(1 << irq);
-	PIC0_CREQ |=  (1 << irq);
+	PIC0_IEN &= ~(1 << d->irq);
+	PIC0_CREQ |=  (1 << d->irq);
 }
 
-static void tcc8000_mask_ack_irq1(unsigned int irq)
+static void tcc8000_mask_ack_irq1(struct irq_data *d)
 {
-	PIC1_IEN &= ~(1 << (irq - 32));
-	PIC1_CREQ |= (1 << (irq - 32));
+	PIC1_IEN &= ~(1 << (d->irq - 32));
+	PIC1_CREQ |= (1 << (d->irq - 32));
 }
 
-static void tcc8000_mask_irq0(unsigned int irq)
+static void tcc8000_mask_irq0(struct irq_data *d)
 {
-	PIC0_IEN &= ~(1 << irq);
+	PIC0_IEN &= ~(1 << d->irq);
 }
 
-static void tcc8000_mask_irq1(unsigned int irq)
+static void tcc8000_mask_irq1(struct irq_data *d)
 {
-	PIC1_IEN &= ~(1 << (irq - 32));
+	PIC1_IEN &= ~(1 << (d->irq - 32));
 }
 
-static void tcc8000_ack_irq0(unsigned int irq)
+static void tcc8000_ack_irq0(struct irq_data *d)
 {
-	PIC0_CREQ |=  (1 << irq);
+	PIC0_CREQ |=  (1 << d->irq);
 }
 
-static void tcc8000_ack_irq1(unsigned int irq)
+static void tcc8000_ack_irq1(struct irq_data *d)
 {
-	PIC1_CREQ |= (1 << (irq - 32));
+	PIC1_CREQ |= (1 << (d->irq - 32));
 }
 
 /* Enable IRQ */
-static void tcc8000_unmask_irq0(unsigned int irq)
+static void tcc8000_unmask_irq0(struct irq_data *d)
 {
-	PIC0_IEN |= (1 << irq);
-	PIC0_INTOEN |= (1 << irq);
+	PIC0_IEN |= (1 << d->irq);
+	PIC0_INTOEN |= (1 << d->irq);
 }
 
-static void tcc8000_unmask_irq1(unsigned int irq)
+static void tcc8000_unmask_irq1(struct irq_data *d)
 {
-	PIC1_IEN |= (1 << (irq - 32));
-	PIC1_INTOEN |= (1 << (irq - 32));
+	PIC1_IEN |= (1 << (d->irq - 32));
+	PIC1_INTOEN |= (1 << (d->irq - 32));
 }
 
 static struct irq_chip tcc8000_irq_chip0 = {
 	.name		= "tcc_irq0",
-	.mask		= tcc8000_mask_irq0,
-	.ack		= tcc8000_ack_irq0,
-	.mask_ack	= tcc8000_mask_ack_irq0,
-	.unmask		= tcc8000_unmask_irq0,
+	.irq_mask	= tcc8000_mask_irq0,
+	.irq_ack	= tcc8000_ack_irq0,
+	.irq_mask_ack	= tcc8000_mask_ack_irq0,
+	.irq_unmask	= tcc8000_unmask_irq0,
 };
 
 static struct irq_chip tcc8000_irq_chip1 = {
 	.name		= "tcc_irq1",
-	.mask		= tcc8000_mask_irq1,
-	.ack		= tcc8000_ack_irq1,
-	.mask_ack	= tcc8000_mask_ack_irq1,
-	.unmask		= tcc8000_unmask_irq1,
+	.irq_mask	= tcc8000_mask_irq1,
+	.irq_ack	= tcc8000_ack_irq1,
+	.irq_mask_ack	= tcc8000_mask_ack_irq1,
+	.irq_unmask	= tcc8000_unmask_irq1,
 };
 
 void __init tcc8k_init_irq(void)
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
index 0775265..bd06620 100644
--- a/arch/arm/mach-tegra/gpio.c
+++ b/arch/arm/mach-tegra/gpio.c
@@ -142,31 +142,31 @@
 	.ngpio			= TEGRA_NR_GPIOS,
 };
 
-static void tegra_gpio_irq_ack(unsigned int irq)
+static void tegra_gpio_irq_ack(struct irq_data *d)
 {
-	int gpio = irq - INT_GPIO_BASE;
+	int gpio = d->irq - INT_GPIO_BASE;
 
 	__raw_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
 }
 
-static void tegra_gpio_irq_mask(unsigned int irq)
+static void tegra_gpio_irq_mask(struct irq_data *d)
 {
-	int gpio = irq - INT_GPIO_BASE;
+	int gpio = d->irq - INT_GPIO_BASE;
 
 	tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0);
 }
 
-static void tegra_gpio_irq_unmask(unsigned int irq)
+static void tegra_gpio_irq_unmask(struct irq_data *d)
 {
-	int gpio = irq - INT_GPIO_BASE;
+	int gpio = d->irq - INT_GPIO_BASE;
 
 	tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1);
 }
 
-static int tegra_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	int gpio = irq - INT_GPIO_BASE;
-	struct tegra_gpio_bank *bank = get_irq_chip_data(irq);
+	int gpio = d->irq - INT_GPIO_BASE;
+	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
 	int port = GPIO_PORT(gpio);
 	int lvl_type;
 	int val;
@@ -221,7 +221,7 @@
 	int pin;
 	int unmasked = 0;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	bank = get_irq_data(irq);
 
@@ -240,7 +240,7 @@
 			 */
 			if (lvl & (0x100 << pin)) {
 				unmasked = 1;
-				desc->chip->unmask(irq);
+				desc->irq_data.chip->irq_unmask(&desc->irq_data);
 			}
 
 			generic_handle_irq(gpio_to_irq(gpio + pin));
@@ -248,7 +248,7 @@
 	}
 
 	if (!unmasked)
-		desc->chip->unmask(irq);
+		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 
 }
 
@@ -316,21 +316,21 @@
 	local_irq_restore(flags);
 }
 
-static int tegra_gpio_wake_enable(unsigned int irq, unsigned int enable)
+static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable)
 {
-	struct tegra_gpio_bank *bank = get_irq_chip_data(irq);
+	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
 	return set_irq_wake(bank->irq, enable);
 }
 #endif
 
 static struct irq_chip tegra_gpio_irq_chip = {
 	.name		= "GPIO",
-	.ack		= tegra_gpio_irq_ack,
-	.mask		= tegra_gpio_irq_mask,
-	.unmask		= tegra_gpio_irq_unmask,
-	.set_type	= tegra_gpio_irq_set_type,
+	.irq_ack	= tegra_gpio_irq_ack,
+	.irq_mask	= tegra_gpio_irq_mask,
+	.irq_unmask	= tegra_gpio_irq_unmask,
+	.irq_set_type	= tegra_gpio_irq_set_type,
 #ifdef CONFIG_PM
-	.set_wake	= tegra_gpio_wake_enable,
+	.irq_set_wake	= tegra_gpio_wake_enable,
 #endif
 };
 
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c
index a5cb1ce..f329404 100644
--- a/arch/arm/mach-tegra/hotplug.c
+++ b/arch/arm/mach-tegra/hotplug.c
@@ -26,10 +26,10 @@
 	 * Turn off coherency
 	 */
 	"	mrc	p15, 0, %0, c1, c0, 1\n"
-	"	bic	%0, %0, %2\n"
+	"	bic	%0, %0, #0x20\n"
 	"	mcr	p15, 0, %0, c1, c0, 1\n"
 	"	mrc	p15, 0, %0, c1, c0, 0\n"
-	"	bic	%0, %0, #0x04\n"
+	"	bic	%0, %0, %2\n"
 	"	mcr	p15, 0, %0, c1, c0, 0\n"
 	  : "=&r" (v)
 	  : "r" (0), "Ir" (CR_C)
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index 5407de0..de7dfad 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -46,30 +46,30 @@
 #define ICTLR_COP_IER_CLR	0x38
 #define ICTLR_COP_IEP_CLASS	0x3c
 
-static void (*gic_mask_irq)(unsigned int irq);
-static void (*gic_unmask_irq)(unsigned int irq);
+static void (*gic_mask_irq)(struct irq_data *d);
+static void (*gic_unmask_irq)(struct irq_data *d);
 
 #define irq_to_ictlr(irq) (((irq)-32) >> 5)
 static void __iomem *tegra_ictlr_base = IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE);
 #define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr)*0x100)
 
-static void tegra_mask(unsigned int irq)
+static void tegra_mask(struct irq_data *d)
 {
-	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(irq));
-	gic_mask_irq(irq);
-	writel(1<<(irq&31), addr+ICTLR_CPU_IER_CLR);
+	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
+	gic_mask_irq(d);
+	writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_CLR);
 }
 
-static void tegra_unmask(unsigned int irq)
+static void tegra_unmask(struct irq_data *d)
 {
-	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(irq));
-	gic_unmask_irq(irq);
-	writel(1<<(irq&31), addr+ICTLR_CPU_IER_SET);
+	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
+	gic_unmask_irq(d);
+	writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_SET);
 }
 
 #ifdef CONFIG_PM
 
-static int tegra_set_wake(unsigned int irq, unsigned int on)
+static int tegra_set_wake(struct irq_data *d, unsigned int on)
 {
 	return 0;
 }
@@ -77,10 +77,10 @@
 
 static struct irq_chip tegra_irq = {
 	.name		= "PPI",
-	.mask		= tegra_mask,
-	.unmask		= tegra_unmask,
+	.irq_mask	= tegra_mask,
+	.irq_unmask	= tegra_unmask,
 #ifdef CONFIG_PM
-	.set_wake	= tegra_set_wake,
+	.irq_set_wake	= tegra_set_wake,
 #endif
 };
 
@@ -98,11 +98,11 @@
 		 IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
 
 	gic = get_irq_chip(29);
-	gic_unmask_irq = gic->unmask;
-	gic_mask_irq = gic->mask;
-	tegra_irq.ack = gic->ack;
+	gic_unmask_irq = gic->irq_unmask;
+	gic_mask_irq = gic->irq_mask;
+	tegra_irq.irq_ack = gic->irq_ack;
 #ifdef CONFIG_SMP
-	tegra_irq.set_affinity = gic->set_affinity;
+	tegra_irq.irq_set_affinity = gic->irq_set_affinity;
 #endif
 
 	for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) {
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 13a83e4..136c32e 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -63,23 +63,25 @@
 #define VA_VIC_BASE		__io_address(VERSATILE_VIC_BASE)
 #define VA_SIC_BASE		__io_address(VERSATILE_SIC_BASE)
 
-static void sic_mask_irq(unsigned int irq)
+static void sic_mask_irq(struct irq_data *d)
 {
-	irq -= IRQ_SIC_START;
+	unsigned int irq = d->irq - IRQ_SIC_START;
+
 	writel(1 << irq, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
 }
 
-static void sic_unmask_irq(unsigned int irq)
+static void sic_unmask_irq(struct irq_data *d)
 {
-	irq -= IRQ_SIC_START;
+	unsigned int irq = d->irq - IRQ_SIC_START;
+
 	writel(1 << irq, VA_SIC_BASE + SIC_IRQ_ENABLE_SET);
 }
 
 static struct irq_chip sic_chip = {
-	.name	= "SIC",
-	.ack	= sic_mask_irq,
-	.mask	= sic_mask_irq,
-	.unmask	= sic_unmask_irq,
+	.name		= "SIC",
+	.irq_ack	= sic_mask_irq,
+	.irq_mask	= sic_mask_irq,
+	.irq_unmask	= sic_unmask_irq,
 };
 
 static void
diff --git a/arch/arm/mach-w90x900/irq.c b/arch/arm/mach-w90x900/irq.c
index 0ce9d8e..9c35010 100644
--- a/arch/arm/mach-w90x900/irq.c
+++ b/arch/arm/mach-w90x900/irq.c
@@ -92,15 +92,15 @@
 	__raw_writel(regval, REG_AIC_GEN);
 }
 
-static void nuc900_irq_mask(unsigned int irq)
+static void nuc900_irq_mask(struct irq_data *d)
 {
 	struct group_irq *group_irq;
 
 	group_irq = NULL;
 
-	__raw_writel(1 << irq, REG_AIC_MDCR);
+	__raw_writel(1 << d->irq, REG_AIC_MDCR);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_GROUP0:
 		group_irq = &group_nirq0;
 		break;
@@ -143,20 +143,20 @@
  * to REG_AIC_EOSCR for ACK
  */
 
-static void nuc900_irq_ack(unsigned int irq)
+static void nuc900_irq_ack(struct irq_data *d)
 {
 	__raw_writel(0x01, REG_AIC_EOSCR);
 }
 
-static void nuc900_irq_unmask(unsigned int irq)
+static void nuc900_irq_unmask(struct irq_data *d)
 {
 	struct group_irq *group_irq;
 
 	group_irq = NULL;
 
-	__raw_writel(1 << irq, REG_AIC_MECR);
+	__raw_writel(1 << d->irq, REG_AIC_MECR);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_GROUP0:
 		group_irq = &group_nirq0;
 		break;
@@ -195,9 +195,9 @@
 }
 
 static struct irq_chip nuc900_irq_chip = {
-	.ack	   = nuc900_irq_ack,
-	.mask	   = nuc900_irq_mask,
-	.unmask	   = nuc900_irq_unmask,
+	.irq_ack	= nuc900_irq_ack,
+	.irq_mask	= nuc900_irq_mask,
+	.irq_unmask	= nuc900_irq_unmask,
 };
 
 void __init nuc900_init_irq(void)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index fcc1e62..9d30c6f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -644,7 +644,7 @@
 
 config SWP_EMULATE
 	bool "Emulate SWP/SWPB instructions"
-	depends on CPU_V7
+	depends on CPU_V7 && !CPU_V6
 	select HAVE_PROC_CPU if PROC_FS
 	default y if SMP
 	help
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6b48e0a..4771dba 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -577,7 +577,7 @@
  * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
- * @nents: number of buffers to unmap (returned from dma_map_sg)
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  *
  * Unmap a set of streaming mode DMA translations.  Again, CPU access
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b49fab2..0c1172b 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -159,7 +159,9 @@
 	tstne	r1, #L_PTE_PRESENT
 	moveq	r3, #0
 
-	str	r3, [r0, #2048]!
+ ARM(	str	r3, [r0, #2048]! )
+ THUMB(	add	r0, r0, #2048 )
+ THUMB(	str	r3, [r0] )
 	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
 #endif
 	mov	pc, lr
diff --git a/arch/arm/plat-mxc/3ds_debugboard.c b/arch/arm/plat-mxc/3ds_debugboard.c
index 639c54a..c856fa3 100644
--- a/arch/arm/plat-mxc/3ds_debugboard.c
+++ b/arch/arm/plat-mxc/3ds_debugboard.c
@@ -60,7 +60,6 @@
 #define EXPIO_INT_BUTTON_B	(MXC_BOARD_IRQ_START + 4)
 
 static void __iomem *brd_io;
-static void expio_ack_irq(u32 irq);
 
 static struct resource smsc911x_resources[] = {
 	{
@@ -93,7 +92,8 @@
 	u32 int_valid;
 	u32 expio_irq;
 
-	desc->chip->mask(irq);	/* irq = gpio irq number */
+	/* irq = gpio irq number */
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 	imr_val = __raw_readw(brd_io + INTR_MASK_REG);
 	int_valid = __raw_readw(brd_io + INTR_STATUS_REG) & ~imr_val;
@@ -110,37 +110,37 @@
 			d->handle_irq(expio_irq, d);
 	}
 
-	desc->chip->ack(irq);
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 /*
  * Disable an expio pin's interrupt by setting the bit in the imr.
  * Irq is an expio virtual irq number
  */
-static void expio_mask_irq(u32 irq)
+static void expio_mask_irq(struct irq_data *d)
 {
 	u16 reg;
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 
 	reg = __raw_readw(brd_io + INTR_MASK_REG);
 	reg |= (1 << expio);
 	__raw_writew(reg, brd_io + INTR_MASK_REG);
 }
 
-static void expio_ack_irq(u32 irq)
+static void expio_ack_irq(struct irq_data *d)
 {
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 
 	__raw_writew(1 << expio, brd_io + INTR_RESET_REG);
 	__raw_writew(0, brd_io + INTR_RESET_REG);
-	expio_mask_irq(irq);
+	expio_mask_irq(d);
 }
 
-static void expio_unmask_irq(u32 irq)
+static void expio_unmask_irq(struct irq_data *d)
 {
 	u16 reg;
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 
 	reg = __raw_readw(brd_io + INTR_MASK_REG);
 	reg &= ~(1 << expio);
@@ -148,9 +148,9 @@
 }
 
 static struct irq_chip expio_irq_chip = {
-	.ack = expio_ack_irq,
-	.mask = expio_mask_irq,
-	.unmask = expio_unmask_irq,
+	.irq_ack = expio_ack_irq,
+	.irq_mask = expio_mask_irq,
+	.irq_unmask = expio_unmask_irq,
 };
 
 int __init mxc_expio_init(u32 base, u32 p_irq)
diff --git a/arch/arm/plat-mxc/avic.c b/arch/arm/plat-mxc/avic.c
index 9a4e8a2..deb284b 100644
--- a/arch/arm/plat-mxc/avic.c
+++ b/arch/arm/plat-mxc/avic.c
@@ -89,22 +89,22 @@
 #endif /* CONFIG_FIQ */
 
 /* Disable interrupt number "irq" in the AVIC */
-static void mxc_mask_irq(unsigned int irq)
+static void mxc_mask_irq(struct irq_data *d)
 {
-	__raw_writel(irq, avic_base + AVIC_INTDISNUM);
+	__raw_writel(d->irq, avic_base + AVIC_INTDISNUM);
 }
 
 /* Enable interrupt number "irq" in the AVIC */
-static void mxc_unmask_irq(unsigned int irq)
+static void mxc_unmask_irq(struct irq_data *d)
 {
-	__raw_writel(irq, avic_base + AVIC_INTENNUM);
+	__raw_writel(d->irq, avic_base + AVIC_INTENNUM);
 }
 
 static struct mxc_irq_chip mxc_avic_chip = {
 	.base = {
-		.ack = mxc_mask_irq,
-		.mask = mxc_mask_irq,
-		.unmask = mxc_unmask_irq,
+		.irq_ack = mxc_mask_irq,
+		.irq_mask = mxc_mask_irq,
+		.irq_unmask = mxc_unmask_irq,
 	},
 #ifdef CONFIG_MXC_IRQ_PRIOR
 	.set_priority = avic_irq_set_priority,
diff --git a/arch/arm/plat-mxc/devices/Kconfig b/arch/arm/plat-mxc/devices/Kconfig
index 2537166..b9ab1d5 100644
--- a/arch/arm/plat-mxc/devices/Kconfig
+++ b/arch/arm/plat-mxc/devices/Kconfig
@@ -1,6 +1,6 @@
 config IMX_HAVE_PLATFORM_FEC
 	bool
-	default y if ARCH_MX25 || SOC_IMX27 || SOC_IMX35 || SOC_IMX51
+	default y if ARCH_MX25 || SOC_IMX27 || SOC_IMX35 || SOC_IMX51 || SOC_IMX53
 
 config IMX_HAVE_PLATFORM_FLEXCAN
 	select HAVE_CAN_FLEXCAN if CAN
diff --git a/arch/arm/plat-mxc/devices/platform-fec.c b/arch/arm/plat-mxc/devices/platform-fec.c
index 269ec78..b50c351 100644
--- a/arch/arm/plat-mxc/devices/platform-fec.c
+++ b/arch/arm/plat-mxc/devices/platform-fec.c
@@ -36,6 +36,11 @@
 	imx_fec_data_entry_single(MX51);
 #endif
 
+#ifdef CONFIG_SOC_IMX53
+const struct imx_fec_data imx53_fec_data __initconst =
+	imx_fec_data_entry_single(MX53);
+#endif
+
 struct platform_device *__init imx_add_fec(
 		const struct imx_fec_data *data,
 		const struct fec_platform_data *pdata)
diff --git a/arch/arm/plat-mxc/devices/platform-imx-i2c.c b/arch/arm/plat-mxc/devices/platform-imx-i2c.c
index 72ba880..7ba94e1 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-i2c.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-i2c.c
@@ -78,6 +78,15 @@
 };
 #endif /* ifdef CONFIG_SOC_IMX51 */
 
+#ifdef CONFIG_SOC_IMX53
+const struct imx_imx_i2c_data imx53_imx_i2c_data[] __initconst = {
+#define imx53_imx_i2c_data_entry(_id, _hwid)				\
+	imx_imx_i2c_data_entry(MX53, _id, _hwid, SZ_4K)
+	imx53_imx_i2c_data_entry(0, 1),
+	imx53_imx_i2c_data_entry(1, 2),
+};
+#endif /* ifdef CONFIG_SOC_IMX51 */
+
 struct platform_device *__init imx_add_imx_i2c(
 		const struct imx_imx_i2c_data *data,
 		const struct imxi2c_platform_data *pdata)
diff --git a/arch/arm/plat-mxc/devices/platform-imx-keypad.c b/arch/arm/plat-mxc/devices/platform-imx-keypad.c
index 40238f0..2636611 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-keypad.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-keypad.c
@@ -41,6 +41,11 @@
 	imx_imx_keypad_data_entry_single(MX35, SZ_16);
 #endif /* ifdef CONFIG_SOC_IMX35 */
 
+#ifdef CONFIG_SOC_IMX51
+const struct imx_imx_keypad_data imx51_imx_keypad_data __initconst =
+	imx_imx_keypad_data_entry_single(MX51, SZ_16);
+#endif /* ifdef CONFIG_SOC_IMX51 */
+
 struct platform_device *__init imx_add_imx_keypad(
 		const struct imx_imx_keypad_data *data,
 		const struct matrix_keymap_data *pdata)
diff --git a/arch/arm/plat-mxc/devices/platform-mxc_pwm.c b/arch/arm/plat-mxc/devices/platform-mxc_pwm.c
index 3d8ebdb..b0c4ae2 100644
--- a/arch/arm/plat-mxc/devices/platform-mxc_pwm.c
+++ b/arch/arm/plat-mxc/devices/platform-mxc_pwm.c
@@ -40,6 +40,15 @@
 	imx_mxc_pwm_data_entry_single(MX27, 0, , SZ_4K);
 #endif /* ifdef CONFIG_SOC_IMX27 */
 
+#ifdef CONFIG_SOC_IMX51
+const struct imx_mxc_pwm_data imx51_mxc_pwm_data[] __initconst = {
+#define imx51_mxc_pwm_data_entry(_id, _hwid)				\
+	imx_mxc_pwm_data_entry(MX51, _id, _hwid, SZ_16K)
+	imx51_mxc_pwm_data_entry(0, 1),
+	imx51_mxc_pwm_data_entry(1, 2),
+};
+#endif /* ifdef CONFIG_SOC_IMX51 */
+
 struct platform_device *__init imx_add_mxc_pwm(
 		const struct imx_mxc_pwm_data *data)
 {
diff --git a/arch/arm/plat-mxc/devices/platform-sdhci-esdhc-imx.c b/arch/arm/plat-mxc/devices/platform-sdhci-esdhc-imx.c
index b352564..6b2940b 100644
--- a/arch/arm/plat-mxc/devices/platform-sdhci-esdhc-imx.c
+++ b/arch/arm/plat-mxc/devices/platform-sdhci-esdhc-imx.c
@@ -53,6 +53,18 @@
 };
 #endif /* ifdef CONFIG_SOC_IMX51 */
 
+#ifdef CONFIG_SOC_IMX53
+const struct imx_sdhci_esdhc_imx_data
+imx53_sdhci_esdhc_imx_data[] __initconst = {
+#define imx53_sdhci_esdhc_imx_data_entry(_id, _hwid)			\
+	imx_sdhci_esdhc_imx_data_entry(MX53, _id, _hwid)
+	imx53_sdhci_esdhc_imx_data_entry(0, 1),
+	imx53_sdhci_esdhc_imx_data_entry(1, 2),
+	imx53_sdhci_esdhc_imx_data_entry(2, 3),
+	imx53_sdhci_esdhc_imx_data_entry(3, 4),
+};
+#endif /* ifdef CONFIG_SOC_IMX53 */
+
 struct platform_device *__init imx_add_sdhci_esdhc_imx(
 		const struct imx_sdhci_esdhc_imx_data *data,
 		const struct esdhc_platform_data *pdata)
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index 8ea49ad..013c85f 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -81,6 +81,18 @@
 };
 #endif /* ifdef CONFIG_SOC_IMX51 */
 
+#ifdef CONFIG_SOC_IMX53
+const struct imx_spi_imx_data imx53_cspi_data __initconst =
+	imx_spi_imx_data_entry_single(MX53, CSPI, "imx53-cspi", 0, , SZ_4K);
+
+const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = {
+#define imx53_ecspi_data_entry(_id, _hwid)				\
+	imx_spi_imx_data_entry(MX53, ECSPI, "imx53-ecspi", _id, _hwid, SZ_4K)
+	imx53_ecspi_data_entry(0, 1),
+	imx53_ecspi_data_entry(1, 2),
+};
+#endif /* ifdef CONFIG_SOC_IMX53 */
+
 struct platform_device *__init imx_add_spi_imx(
 		const struct imx_spi_imx_data *data,
 		const struct spi_imx_master *pdata)
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
index bc2c7bc..d17b3c9 100644
--- a/arch/arm/plat-mxc/gpio.c
+++ b/arch/arm/plat-mxc/gpio.c
@@ -63,29 +63,29 @@
 	__raw_writel(l, port->base + GPIO_IMR);
 }
 
-static void gpio_ack_irq(u32 irq)
+static void gpio_ack_irq(struct irq_data *d)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	_clear_gpio_irqstatus(&mxc_gpio_ports[gpio / 32], gpio & 0x1f);
 }
 
-static void gpio_mask_irq(u32 irq)
+static void gpio_mask_irq(struct irq_data *d)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	_set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 0);
 }
 
-static void gpio_unmask_irq(u32 irq)
+static void gpio_unmask_irq(struct irq_data *d)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	_set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 1);
 }
 
 static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset);
 
-static int gpio_set_irq_type(u32 irq, u32 type)
+static int gpio_set_irq_type(struct irq_data *d, u32 type)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
 	u32 bit, val;
 	int edge;
@@ -211,9 +211,9 @@
  * @param  enable       enable as wake-up if equal to non-zero
  * @return       This function returns 0 on success.
  */
-static int gpio_set_wake_irq(u32 irq, u32 enable)
+static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	u32 gpio_idx = gpio & 0x1F;
 	struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
 
@@ -233,11 +233,11 @@
 }
 
 static struct irq_chip gpio_irq_chip = {
-	.ack = gpio_ack_irq,
-	.mask = gpio_mask_irq,
-	.unmask = gpio_unmask_irq,
-	.set_type = gpio_set_irq_type,
-	.set_wake = gpio_set_wake_irq,
+	.irq_ack = gpio_ack_irq,
+	.irq_mask = gpio_mask_irq,
+	.irq_unmask = gpio_unmask_irq,
+	.irq_set_type = gpio_set_irq_type,
+	.irq_set_wake = gpio_set_wake_irq,
 };
 
 static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx53.h b/arch/arm/plat-mxc/include/mach/iomux-mx53.h
index 5deee01..68e11d7 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx53.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx53.h
@@ -34,7 +34,6 @@
 	IOMUX_CONFIG_ALT6,
 	IOMUX_CONFIG_ALT7,
 	IOMUX_CONFIG_GPIO, /* added to help user use GPIO mode */
-	IOMUX_CONFIG_SION = 0x1 << 4, /* LOOPBACK:MUX SION bit */
 } iomux_pin_cfg_t;
 
 /* These 2 defines are for pins that may not have a mux register, but could
@@ -135,6 +134,9 @@
 #define MX53_PAD_EIM_D16__GPIO_3_16		IOMUX_PAD(0x460, 0x118,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D17__GPIO_3_17		IOMUX_PAD(0x464, 0x11C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D18__GPIO_3_18		IOMUX_PAD(0x468, 0x120,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
+#define MX53_PAD_EIM_D16__CSPI1_SCLK		IOMUX_PAD(0x460, 0x118,IOMUX_CONFIG_ALT4, 0x79c, 3, NO_PAD_CTRL)
+#define MX53_PAD_EIM_D17__CSPI1_MISO		IOMUX_PAD(0x464, 0x11C,IOMUX_CONFIG_ALT4, 0x7a0, 3, NO_PAD_CTRL)
+#define MX53_PAD_EIM_D18__CSPI1_MOSI		IOMUX_PAD(0x468, 0x120,IOMUX_CONFIG_ALT4, 0x7a4, 3, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D19__GPIO_3_19		IOMUX_PAD(0x46C, 0x124,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D20__GPIO_3_20		IOMUX_PAD(0x470, 0x128,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D21__GPIO_3_21		IOMUX_PAD(0x474, 0x12C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/iomux-v3.h b/arch/arm/plat-mxc/include/mach/iomux-v3.h
index 2277b01..82620af 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-v3.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-v3.h
@@ -105,6 +105,7 @@
 #define PAD_CTL_SRE_FAST		(1 << 0)
 #define PAD_CTL_SRE_SLOW		(0 << 0)
 
+#define IOMUX_CONFIG_SION		(0x1 << 4)
 
 #define MX51_NUM_GPIO_PORT	4
 
diff --git a/arch/arm/plat-mxc/include/mach/mx51.h b/arch/arm/plat-mxc/include/mach/mx51.h
index 873807f..1eb339e 100644
--- a/arch/arm/plat-mxc/include/mach/mx51.h
+++ b/arch/arm/plat-mxc/include/mach/mx51.h
@@ -301,8 +301,8 @@
 #define MX51_MXC_INT_GPIO4_HIGH		57
 #define MX51_MXC_INT_WDOG1		58
 #define MX51_MXC_INT_WDOG2		59
-#define MX51_MXC_INT_KPP		60
-#define MX51_MXC_INT_PWM1		61
+#define MX51_INT_KPP			60
+#define MX51_INT_PWM1			61
 #define MX51_INT_I2C1			62
 #define MX51_INT_I2C2			63
 #define MX51_MXC_INT_HS_I2C		64
@@ -335,7 +335,7 @@
 #define MX51_MXC_INT_SPDIF		91
 #define MX51_MXC_INT_TVE		92
 #define MX51_MXC_INT_FIRI		93
-#define MX51_MXC_INT_PWM2		94
+#define MX51_INT_PWM2			94
 #define MX51_MXC_INT_SLIM_EXP		95
 #define MX51_INT_SSI3			96
 #define MX51_MXC_INT_EMI_BOOT		97
diff --git a/arch/arm/plat-mxc/include/mach/mx53.h b/arch/arm/plat-mxc/include/mach/mx53.h
index 9577cdb..d7a8e52 100644
--- a/arch/arm/plat-mxc/include/mach/mx53.h
+++ b/arch/arm/plat-mxc/include/mach/mx53.h
@@ -53,13 +53,13 @@
 #define MX53_SPBA0_BASE_ADDR		0x50000000
 #define MX53_SPBA0_SIZE		SZ_1M
 
-#define MX53_MMC_SDHC1_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00004000)
-#define MX53_MMC_SDHC2_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00008000)
+#define MX53_ESDHC1_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00004000)
+#define MX53_ESDHC2_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00008000)
 #define MX53_UART3_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x0000C000)
-#define MX53_CSPI1_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x00010000)
+#define MX53_ECSPI1_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x00010000)
 #define MX53_SSI2_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x00014000)
-#define MX53_MMC_SDHC3_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00020000)
-#define MX53_MMC_SDHC4_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00024000)
+#define MX53_ESDHC3_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00020000)
+#define MX53_ESDHC4_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00024000)
 #define MX53_SPDIF_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x00028000)
 #define MX53_ASRC_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x0002C000)
 #define MX53_ATA_DMA_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00030000)
@@ -117,12 +117,12 @@
 #define MX53_ARM_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000A0000)
 #define MX53_OWIRE_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000A4000)
 #define MX53_FIRI_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000A8000)
-#define MX53_CSPI2_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000AC000)
+#define MX53_ECSPI2_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000AC000)
 #define MX53_SDMA_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000B0000)
 #define MX53_SCC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000B4000)
 #define MX53_ROMCP_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000B8000)
 #define MX53_RTIC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000BC000)
-#define MX53_CSPI3_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000C0000)
+#define MX53_CSPI_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000C0000)
 #define MX53_I2C2_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000C4000)
 #define MX53_I2C1_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000C8000)
 #define MX53_SSI1_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000CC000)
@@ -136,7 +136,7 @@
 #define MX53_MIPI_HSC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000DC000)
 #define MX53_MLB_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000E4000)
 #define MX53_SSI3_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000E8000)
-#define MX53_MXC_FEC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000EC000)
+#define MX53_FEC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000EC000)
 #define MX53_TVE_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000F0000)
 #define MX53_VPU_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000F4000)
 #define MX53_SAHARA_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000F8000)
@@ -229,10 +229,10 @@
  * Interrupt numbers
  */
 #define MX53_INT_RESV0		0
-#define MX53_INT_MMC_SDHC1	1
-#define MX53_INT_MMC_SDHC2	2
-#define MX53_INT_MMC_SDHC3	3
-#define MX53_INT_MMC_SDHC4	4
+#define MX53_INT_ESDHC1	1
+#define MX53_INT_ESDHC2	2
+#define MX53_INT_ESDHC3	3
+#define MX53_INT_ESDHC4	4
 #define MX53_INT_RESV5	5
 #define MX53_INT_SDMA	6
 #define MX53_INT_IOMUX	7
@@ -264,8 +264,8 @@
 #define MX53_INT_UART3	33
 #define MX53_INT_RESV34	34
 #define MX53_INT_RESV35	35
-#define MX53_INT_CSPI1	36
-#define MX53_INT_CSPI2	37
+#define MX53_INT_ECSPI1	36
+#define MX53_INT_ECSPI2	37
 #define MX53_INT_CSPI	38
 #define MX53_INT_GPT	39
 #define MX53_INT_EPIT1	40
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
index c36f263..7a61ef8 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/arch/arm/plat-mxc/pwm.c
@@ -57,7 +57,7 @@
 	if (pwm == NULL || period_ns == 0 || duty_ns > period_ns)
 		return -EINVAL;
 
-	if (cpu_is_mx27() || cpu_is_mx3() || cpu_is_mx25()) {
+	if (cpu_is_mx27() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) {
 		unsigned long long c;
 		unsigned long period_cycles, duty_cycles, prescale;
 		u32 cr;
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index e69ed8a..bc3a6be 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -69,50 +69,50 @@
 #endif
 
 /**
- * tzic_mask_irq() - Disable interrupt number "irq" in the TZIC
+ * tzic_mask_irq() - Disable interrupt source "d" in the TZIC
  *
- * @param  irq          interrupt source number
+ * @param  d            interrupt source
  */
-static void tzic_mask_irq(unsigned int irq)
+static void tzic_mask_irq(struct irq_data *d)
 {
 	int index, off;
 
-	index = irq >> 5;
-	off = irq & 0x1F;
+	index = d->irq >> 5;
+	off = d->irq & 0x1F;
 	__raw_writel(1 << off, tzic_base + TZIC_ENCLEAR0(index));
 }
 
 /**
- * tzic_unmask_irq() - Enable interrupt number "irq" in the TZIC
+ * tzic_unmask_irq() - Enable interrupt source "d" in the TZIC
  *
- * @param  irq          interrupt source number
+ * @param  d            interrupt source
  */
-static void tzic_unmask_irq(unsigned int irq)
+static void tzic_unmask_irq(struct irq_data *d)
 {
 	int index, off;
 
-	index = irq >> 5;
-	off = irq & 0x1F;
+	index = d->irq >> 5;
+	off = d->irq & 0x1F;
 	__raw_writel(1 << off, tzic_base + TZIC_ENSET0(index));
 }
 
 static unsigned int wakeup_intr[4];
 
 /**
- * tzic_set_wake_irq() - Set interrupt number "irq" in the TZIC as a wake-up source.
+ * tzic_set_wake_irq() - Set interrupt source "d" in the TZIC as a wake-up source.
  *
- * @param  irq          interrupt source number
+ * @param  d            interrupt source
  * @param  enable       enable as wake-up if equal to non-zero
  * 			disble as wake-up if equal to zero
  *
  * @return       This function returns 0 on success.
  */
-static int tzic_set_wake_irq(unsigned int irq, unsigned int enable)
+static int tzic_set_wake_irq(struct irq_data *d, unsigned int enable)
 {
 	unsigned int index, off;
 
-	index = irq >> 5;
-	off = irq & 0x1F;
+	index = d->irq >> 5;
+	off = d->irq & 0x1F;
 
 	if (index > 3)
 		return -EINVAL;
@@ -128,10 +128,10 @@
 static struct mxc_irq_chip mxc_tzic_chip = {
 	.base = {
 		.name = "MXC_TZIC",
-		.ack = tzic_mask_irq,
-		.mask = tzic_mask_irq,
-		.unmask = tzic_unmask_irq,
-		.set_wake = tzic_set_wake_irq,
+		.irq_ack = tzic_mask_irq,
+		.irq_mask = tzic_mask_irq,
+		.irq_unmask = tzic_unmask_irq,
+		.irq_set_wake = tzic_set_wake_irq,
 	},
 #ifdef CONFIG_FIQ
 	.set_irq_fiq = tzic_set_irq_fiq,
diff --git a/arch/arm/plat-nomadik/gpio.c b/arch/arm/plat-nomadik/gpio.c
index eda4e3a..1e88ecb 100644
--- a/arch/arm/plat-nomadik/gpio.c
+++ b/arch/arm/plat-nomadik/gpio.c
@@ -356,13 +356,13 @@
 	return 1 << (gpio % 32);
 }
 
-static void nmk_gpio_irq_ack(unsigned int irq)
+static void nmk_gpio_irq_ack(struct irq_data *d)
 {
 	int gpio;
 	struct nmk_gpio_chip *nmk_chip;
 
-	gpio = NOMADIK_IRQ_TO_GPIO(irq);
-	nmk_chip = get_irq_chip_data(irq);
+	gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+	nmk_chip = irq_data_get_irq_chip_data(d);
 	if (!nmk_chip)
 		return;
 	writel(nmk_gpio_get_bitmask(gpio), nmk_chip->addr + NMK_GPIO_IC);
@@ -401,7 +401,7 @@
 	}
 }
 
-static int nmk_gpio_irq_modify(unsigned int irq, enum nmk_gpio_irq_type which,
+static int nmk_gpio_irq_modify(struct irq_data *d, enum nmk_gpio_irq_type which,
 			       bool enable)
 {
 	int gpio;
@@ -409,8 +409,8 @@
 	unsigned long flags;
 	u32 bitmask;
 
-	gpio = NOMADIK_IRQ_TO_GPIO(irq);
-	nmk_chip = get_irq_chip_data(irq);
+	gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+	nmk_chip = irq_data_get_irq_chip_data(d);
 	bitmask = nmk_gpio_get_bitmask(gpio);
 	if (!nmk_chip)
 		return -EINVAL;
@@ -422,24 +422,24 @@
 	return 0;
 }
 
-static void nmk_gpio_irq_mask(unsigned int irq)
+static void nmk_gpio_irq_mask(struct irq_data *d)
 {
-	nmk_gpio_irq_modify(irq, NORMAL, false);
+	nmk_gpio_irq_modify(d, NORMAL, false);
 }
 
-static void nmk_gpio_irq_unmask(unsigned int irq)
+static void nmk_gpio_irq_unmask(struct irq_data *d)
 {
-	nmk_gpio_irq_modify(irq, NORMAL, true);
+	nmk_gpio_irq_modify(d, NORMAL, true);
 }
 
-static int nmk_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	struct nmk_gpio_chip *nmk_chip;
 	unsigned long flags;
 	int gpio;
 
-	gpio = NOMADIK_IRQ_TO_GPIO(irq);
-	nmk_chip = get_irq_chip_data(irq);
+	gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+	nmk_chip = irq_data_get_irq_chip_data(d);
 	if (!nmk_chip)
 		return -EINVAL;
 
@@ -457,9 +457,9 @@
 	return 0;
 }
 
-static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct irq_desc *desc = irq_to_desc(irq);
+	struct irq_desc *desc = irq_to_desc(d->irq);
 	bool enabled = !(desc->status & IRQ_DISABLED);
 	bool wake = desc->wake_depth;
 	int gpio;
@@ -467,8 +467,8 @@
 	unsigned long flags;
 	u32 bitmask;
 
-	gpio = NOMADIK_IRQ_TO_GPIO(irq);
-	nmk_chip = get_irq_chip_data(irq);
+	gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+	nmk_chip = irq_data_get_irq_chip_data(d);
 	bitmask = nmk_gpio_get_bitmask(gpio);
 	if (!nmk_chip)
 		return -EINVAL;
@@ -507,11 +507,11 @@
 
 static struct irq_chip nmk_gpio_irq_chip = {
 	.name		= "Nomadik-GPIO",
-	.ack		= nmk_gpio_irq_ack,
-	.mask		= nmk_gpio_irq_mask,
-	.unmask		= nmk_gpio_irq_unmask,
-	.set_type	= nmk_gpio_irq_set_type,
-	.set_wake	= nmk_gpio_irq_set_wake,
+	.irq_ack	= nmk_gpio_irq_ack,
+	.irq_mask	= nmk_gpio_irq_mask,
+	.irq_unmask	= nmk_gpio_irq_unmask,
+	.irq_set_type	= nmk_gpio_irq_set_type,
+	.irq_set_wake	= nmk_gpio_irq_set_wake,
 };
 
 static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -522,12 +522,12 @@
 	u32 pending;
 	unsigned int first_irq;
 
-	if (host_chip->mask_ack)
-		host_chip->mask_ack(irq);
+	if (host_chip->irq_mask_ack)
+		host_chip->irq_mask_ack(&desc->irq_data);
 	else {
-		host_chip->mask(irq);
-		if (host_chip->ack)
-			host_chip->ack(irq);
+		host_chip->irq_mask(&desc->irq_data);
+		if (host_chip->irq_ack)
+			host_chip->irq_ack(&desc->irq_data);
 	}
 
 	nmk_chip = get_irq_data(irq);
@@ -537,7 +537,7 @@
 		generic_handle_irq(gpio_irq);
 	}
 
-	host_chip->unmask(irq);
+	host_chip->irq_unmask(&desc->irq_data);
 }
 
 static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index 74b62f1..4d6dd4c 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -13,6 +13,14 @@
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
 
+/*
+ * Maxium size for a single dma descriptor
+ * Size is limited to 16 bits.
+ * Size is in the units of addr-widths (1,2,4,8 bytes)
+ * Larger transfers will be split up to multiple linked desc
+ */
+#define STEDMA40_MAX_SEG_SIZE 0xFFFF
+
 /* dev types for memcpy */
 #define STEDMA40_DEV_DST_MEMORY (-1)
 #define	STEDMA40_DEV_SRC_MEMORY (-1)
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 1f98e0b..971d186 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -718,7 +718,7 @@
 	case METHOD_GPIO_24XX:
 	case METHOD_GPIO_44XX:
 		set_24xx_gpio_triggering(bank, gpio, trigger);
-		break;
+		return 0;
 #endif
 	default:
 		goto bad;
@@ -729,17 +729,17 @@
 	return -EINVAL;
 }
 
-static int gpio_irq_type(unsigned irq, unsigned type)
+static int gpio_irq_type(struct irq_data *d, unsigned type)
 {
 	struct gpio_bank *bank;
 	unsigned gpio;
 	int retval;
 	unsigned long flags;
 
-	if (!cpu_class_is_omap2() && irq > IH_MPUIO_BASE)
-		gpio = OMAP_MPUIO(irq - IH_MPUIO_BASE);
+	if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
+		gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
 	else
-		gpio = irq - IH_GPIO_BASE;
+		gpio = d->irq - IH_GPIO_BASE;
 
 	if (check_gpio(gpio) < 0)
 		return -EINVAL;
@@ -752,19 +752,21 @@
 			&& (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
 		return -EINVAL;
 
-	bank = get_irq_chip_data(irq);
+	bank = irq_data_get_irq_chip_data(d);
 	spin_lock_irqsave(&bank->lock, flags);
 	retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
 	if (retval == 0) {
-		irq_desc[irq].status &= ~IRQ_TYPE_SENSE_MASK;
-		irq_desc[irq].status |= type;
+		struct irq_desc *desc = irq_to_desc(d->irq);
+
+		desc->status &= ~IRQ_TYPE_SENSE_MASK;
+		desc->status |= type;
 	}
 	spin_unlock_irqrestore(&bank->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__set_irq_handler_unlocked(irq, handle_level_irq);
+		__set_irq_handler_unlocked(d->irq, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__set_irq_handler_unlocked(irq, handle_edge_irq);
+		__set_irq_handler_unlocked(d->irq, handle_edge_irq);
 
 	return retval;
 }
@@ -1021,15 +1023,15 @@
 }
 
 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
-static int gpio_wake_enable(unsigned int irq, unsigned int enable)
+static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
 	struct gpio_bank *bank;
 	int retval;
 
 	if (check_gpio(gpio) < 0)
 		return -ENODEV;
-	bank = get_irq_chip_data(irq);
+	bank = irq_data_get_irq_chip_data(d);
 	retval = _set_gpio_wakeup(bank, get_gpio_index(gpio), enable);
 
 	return retval;
@@ -1142,7 +1144,7 @@
 	u32 retrigger = 0;
 	int unmasked = 0;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	bank = get_irq_data(irq);
 #ifdef CONFIG_ARCH_OMAP1
@@ -1199,7 +1201,7 @@
 		configured, we could unmask GPIO bank interrupt immediately */
 		if (!level_mask && !unmasked) {
 			unmasked = 1;
-			desc->chip->unmask(irq);
+			desc->irq_data.chip->irq_unmask(&desc->irq_data);
 		}
 
 		isr |= retrigger;
@@ -1235,41 +1237,40 @@
 	interrupt */
 exit:
 	if (!unmasked)
-		desc->chip->unmask(irq);
-
+		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
-static void gpio_irq_shutdown(unsigned int irq)
+static void gpio_irq_shutdown(struct irq_data *d)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_reset_gpio(bank, gpio);
 }
 
-static void gpio_ack_irq(unsigned int irq)
+static void gpio_ack_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_clear_gpio_irqstatus(bank, gpio);
 }
 
-static void gpio_mask_irq(unsigned int irq)
+static void gpio_mask_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_set_gpio_irqenable(bank, gpio, 0);
 	_set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
 }
 
-static void gpio_unmask_irq(unsigned int irq)
+static void gpio_unmask_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 	unsigned int irq_mask = 1 << get_gpio_index(gpio);
-	struct irq_desc *desc = irq_to_desc(irq);
+	struct irq_desc *desc = irq_to_desc(d->irq);
 	u32 trigger = desc->status & IRQ_TYPE_SENSE_MASK;
 
 	if (trigger)
@@ -1287,12 +1288,12 @@
 
 static struct irq_chip gpio_irq_chip = {
 	.name		= "GPIO",
-	.shutdown	= gpio_irq_shutdown,
-	.ack		= gpio_ack_irq,
-	.mask		= gpio_mask_irq,
-	.unmask		= gpio_unmask_irq,
-	.set_type	= gpio_irq_type,
-	.set_wake	= gpio_wake_enable,
+	.irq_shutdown	= gpio_irq_shutdown,
+	.irq_ack	= gpio_ack_irq,
+	.irq_mask	= gpio_mask_irq,
+	.irq_unmask	= gpio_unmask_irq,
+	.irq_set_type	= gpio_irq_type,
+	.irq_set_wake	= gpio_wake_enable,
 };
 
 /*---------------------------------------------------------------------*/
@@ -1301,36 +1302,36 @@
 
 /* MPUIO uses the always-on 32k clock */
 
-static void mpuio_ack_irq(unsigned int irq)
+static void mpuio_ack_irq(struct irq_data *d)
 {
 	/* The ISR is reset automatically, so do nothing here. */
 }
 
-static void mpuio_mask_irq(unsigned int irq)
+static void mpuio_mask_irq(struct irq_data *d)
 {
-	unsigned int gpio = OMAP_MPUIO(irq - IH_MPUIO_BASE);
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_set_gpio_irqenable(bank, gpio, 0);
 }
 
-static void mpuio_unmask_irq(unsigned int irq)
+static void mpuio_unmask_irq(struct irq_data *d)
 {
-	unsigned int gpio = OMAP_MPUIO(irq - IH_MPUIO_BASE);
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_set_gpio_irqenable(bank, gpio, 1);
 }
 
 static struct irq_chip mpuio_irq_chip = {
 	.name		= "MPUIO",
-	.ack		= mpuio_ack_irq,
-	.mask		= mpuio_mask_irq,
-	.unmask		= mpuio_unmask_irq,
-	.set_type	= gpio_irq_type,
+	.irq_ack	= mpuio_ack_irq,
+	.irq_mask	= mpuio_mask_irq,
+	.irq_unmask	= mpuio_unmask_irq,
+	.irq_set_type	= gpio_irq_type,
 #ifdef CONFIG_ARCH_OMAP16XX
 	/* REVISIT: assuming only 16xx supports MPUIO wake events */
-	.set_wake	= gpio_wake_enable,
+	.irq_set_wake	= gpio_wake_enable,
 #endif
 };
 
@@ -1671,7 +1672,9 @@
 
 	for (j = bank->virtual_irq_start;
 		     j < bank->virtual_irq_start + bank_width; j++) {
-		lockdep_set_class(&irq_desc[j].lock, &gpio_lock_class);
+		struct irq_desc *d = irq_to_desc(j);
+
+		lockdep_set_class(&d->lock, &gpio_lock_class);
 		set_irq_chip_data(j, bank);
 		if (bank_is_mpuio(bank))
 			set_irq_chip(j, &mpuio_irq_chip);
diff --git a/arch/arm/plat-omap/include/plat/onenand.h b/arch/arm/plat-omap/include/plat/onenand.h
index 72f433d..affe87e 100644
--- a/arch/arm/plat-omap/include/plat/onenand.h
+++ b/arch/arm/plat-omap/include/plat/onenand.h
@@ -23,6 +23,7 @@
 	int                     (*onenand_setup)(void __iomem *, int freq);
 	int			dma_channel;
 	u8			flags;
+	u8			regulator_can_sleep;
 };
 
 #define ONENAND_MAX_PARTITIONS 8
diff --git a/arch/arm/plat-omap/include/plat/voltage.h b/arch/arm/plat-omap/include/plat/voltage.h
index 0ff1233..5bd204e 100644
--- a/arch/arm/plat-omap/include/plat/voltage.h
+++ b/arch/arm/plat-omap/include/plat/voltage.h
@@ -14,6 +14,8 @@
 #ifndef __ARCH_ARM_MACH_OMAP2_VOLTAGE_H
 #define __ARCH_ARM_MACH_OMAP2_VOLTAGE_H
 
+#include <linux/err.h>
+
 #define VOLTSCALE_VPFORCEUPDATE		1
 #define VOLTSCALE_VCBYPASS		2
 
@@ -65,9 +67,6 @@
 	char *name;
 };
 
-/* API to get the voltagedomain pointer */
-struct voltagedomain *omap_voltage_domain_lookup(char *name);
-
 /**
  * struct omap_volt_data - Omap voltage specific data.
  * @voltage_nominal:	The possible voltage value in uV
@@ -131,16 +130,26 @@
 		struct omap_volt_pmic_info *pmic_info);
 void omap_change_voltscale_method(struct voltagedomain *voltdm,
 		int voltscale_method);
+/* API to get the voltagedomain pointer */
+struct voltagedomain *omap_voltage_domain_lookup(char *name);
+
 int omap_voltage_late_init(void);
 #else
 static inline int omap_voltage_register_pmic(struct voltagedomain *voltdm,
-		struct omap_volt_pmic_info *pmic_info) {}
+		struct omap_volt_pmic_info *pmic_info)
+{
+	return -EINVAL;
+}
 static inline  void omap_change_voltscale_method(struct voltagedomain *voltdm,
 		int voltscale_method) {}
 static inline int omap_voltage_late_init(void)
 {
 	return -EINVAL;
 }
+static inline struct voltagedomain *omap_voltage_domain_lookup(char *name)
+{
+	return ERR_PTR(-EINVAL);
+}
 #endif
 
 #endif
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index e814803..5f35223 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -232,20 +232,19 @@
  *        polarity    LEVEL          mask
  *
  ****************************************************************************/
-
-static void gpio_irq_ack(u32 irq)
+static void gpio_irq_ack(struct irq_data *d)
 {
-	int type = irq_desc[irq].status & IRQ_TYPE_SENSE_MASK;
+	int type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK;
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
-		int pin = irq_to_gpio(irq);
+		int pin = irq_to_gpio(d->irq);
 		writel(~(1 << (pin & 31)), GPIO_EDGE_CAUSE(pin));
 	}
 }
 
-static void gpio_irq_mask(u32 irq)
+static void gpio_irq_mask(struct irq_data *d)
 {
-	int pin = irq_to_gpio(irq);
-	int type = irq_desc[irq].status & IRQ_TYPE_SENSE_MASK;
+	int pin = irq_to_gpio(d->irq);
+	int type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK;
 	u32 reg = (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) ?
 		GPIO_EDGE_MASK(pin) : GPIO_LEVEL_MASK(pin);
 	u32 u = readl(reg);
@@ -253,10 +252,10 @@
 	writel(u, reg);
 }
 
-static void gpio_irq_unmask(u32 irq)
+static void gpio_irq_unmask(struct irq_data *d)
 {
-	int pin = irq_to_gpio(irq);
-	int type = irq_desc[irq].status & IRQ_TYPE_SENSE_MASK;
+	int pin = irq_to_gpio(d->irq);
+	int type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK;
 	u32 reg = (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) ?
 		GPIO_EDGE_MASK(pin) : GPIO_LEVEL_MASK(pin);
 	u32 u = readl(reg);
@@ -264,20 +263,20 @@
 	writel(u, reg);
 }
 
-static int gpio_irq_set_type(u32 irq, u32 type)
+static int gpio_irq_set_type(struct irq_data *d, u32 type)
 {
-	int pin = irq_to_gpio(irq);
+	int pin = irq_to_gpio(d->irq);
 	struct irq_desc *desc;
 	u32 u;
 
 	u = readl(GPIO_IO_CONF(pin)) & (1 << (pin & 31));
 	if (!u) {
 		printk(KERN_ERR "orion gpio_irq_set_type failed "
-				"(irq %d, pin %d).\n", irq, pin);
+				"(irq %d, pin %d).\n", d->irq, pin);
 		return -EINVAL;
 	}
 
-	desc = irq_desc + irq;
+	desc = irq_desc + d->irq;
 
 	/*
 	 * Set edge/level type.
@@ -287,7 +286,7 @@
 	} else if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
 		desc->handle_irq = handle_level_irq;
 	} else {
-		printk(KERN_ERR "failed to set irq=%d (type=%d)\n", irq, type);
+		printk(KERN_ERR "failed to set irq=%d (type=%d)\n", d->irq, type);
 		return -EINVAL;
 	}
 
@@ -325,10 +324,10 @@
 
 struct irq_chip orion_gpio_irq_chip = {
 	.name		= "orion_gpio_irq",
-	.ack		= gpio_irq_ack,
-	.mask		= gpio_irq_mask,
-	.unmask		= gpio_irq_unmask,
-	.set_type	= gpio_irq_set_type,
+	.irq_ack	= gpio_irq_ack,
+	.irq_mask	= gpio_irq_mask,
+	.irq_unmask	= gpio_irq_unmask,
+	.irq_set_type	= gpio_irq_set_type,
 };
 
 void orion_gpio_irq_handler(int pinoff)
diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c
index 3f9d34f..7d0c7eb 100644
--- a/arch/arm/plat-orion/irq.c
+++ b/arch/arm/plat-orion/irq.c
@@ -14,31 +14,31 @@
 #include <linux/io.h>
 #include <plat/irq.h>
 
-static void orion_irq_mask(u32 irq)
+static void orion_irq_mask(struct irq_data *d)
 {
-	void __iomem *maskaddr = get_irq_chip_data(irq);
+	void __iomem *maskaddr = irq_data_get_irq_chip_data(d);
 	u32 mask;
 
 	mask = readl(maskaddr);
-	mask &= ~(1 << (irq & 31));
+	mask &= ~(1 << (d->irq & 31));
 	writel(mask, maskaddr);
 }
 
-static void orion_irq_unmask(u32 irq)
+static void orion_irq_unmask(struct irq_data *d)
 {
-	void __iomem *maskaddr = get_irq_chip_data(irq);
+	void __iomem *maskaddr = irq_data_get_irq_chip_data(d);
 	u32 mask;
 
 	mask = readl(maskaddr);
-	mask |= 1 << (irq & 31);
+	mask |= 1 << (d->irq & 31);
 	writel(mask, maskaddr);
 }
 
 static struct irq_chip orion_irq_chip = {
 	.name		= "orion_irq",
-	.mask		= orion_irq_mask,
-	.mask_ack	= orion_irq_mask,
-	.unmask		= orion_irq_unmask,
+	.irq_mask	= orion_irq_mask,
+	.irq_mask_ack	= orion_irq_mask,
+	.irq_unmask	= orion_irq_unmask,
 };
 
 void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c
index 98548c6..e7de6ae 100644
--- a/arch/arm/plat-pxa/gpio.c
+++ b/arch/arm/plat-pxa/gpio.c
@@ -155,10 +155,10 @@
 	__raw_writel(gfer, c->regbase + GFER_OFFSET);
 }
 
-static int pxa_gpio_irq_type(unsigned int irq, unsigned int type)
+static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
 {
 	struct pxa_gpio_chip *c;
-	int gpio = irq_to_gpio(irq);
+	int gpio = irq_to_gpio(d->irq);
 	unsigned long gpdr, mask = GPIO_bit(gpio);
 
 	c = gpio_to_chip(gpio);
@@ -195,7 +195,7 @@
 
 	update_edge_detect(c);
 
-	pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, irq, gpio,
+	pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
 		((type & IRQ_TYPE_EDGE_RISING)  ? " rising"  : ""),
 		((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
 	return 0;
@@ -227,17 +227,17 @@
 	} while (loop);
 }
 
-static void pxa_ack_muxed_gpio(unsigned int irq)
+static void pxa_ack_muxed_gpio(struct irq_data *d)
 {
-	int gpio = irq_to_gpio(irq);
+	int gpio = irq_to_gpio(d->irq);
 	struct pxa_gpio_chip *c = gpio_to_chip(gpio);
 
 	__raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
 }
 
-static void pxa_mask_muxed_gpio(unsigned int irq)
+static void pxa_mask_muxed_gpio(struct irq_data *d)
 {
-	int gpio = irq_to_gpio(irq);
+	int gpio = irq_to_gpio(d->irq);
 	struct pxa_gpio_chip *c = gpio_to_chip(gpio);
 	uint32_t grer, gfer;
 
@@ -249,9 +249,9 @@
 	__raw_writel(gfer, c->regbase + GFER_OFFSET);
 }
 
-static void pxa_unmask_muxed_gpio(unsigned int irq)
+static void pxa_unmask_muxed_gpio(struct irq_data *d)
 {
-	int gpio = irq_to_gpio(irq);
+	int gpio = irq_to_gpio(d->irq);
 	struct pxa_gpio_chip *c = gpio_to_chip(gpio);
 
 	c->irq_mask |= GPIO_bit(gpio);
@@ -260,10 +260,10 @@
 
 static struct irq_chip pxa_muxed_gpio_chip = {
 	.name		= "GPIO",
-	.ack		= pxa_ack_muxed_gpio,
-	.mask		= pxa_mask_muxed_gpio,
-	.unmask		= pxa_unmask_muxed_gpio,
-	.set_type	= pxa_gpio_irq_type,
+	.irq_ack	= pxa_ack_muxed_gpio,
+	.irq_mask	= pxa_mask_muxed_gpio,
+	.irq_unmask	= pxa_unmask_muxed_gpio,
+	.irq_set_type	= pxa_gpio_irq_type,
 };
 
 void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn)
@@ -291,7 +291,7 @@
 
 	/* Install handler for GPIO>=2 edge detect interrupts */
 	set_irq_chained_handler(mux_irq, pxa_gpio_demux_handler);
-	pxa_muxed_gpio_chip.set_wake = fn;
+	pxa_muxed_gpio_chip.irq_set_wake = fn;
 }
 
 #ifdef CONFIG_PM
diff --git a/arch/arm/plat-pxa/include/plat/gpio.h b/arch/arm/plat-pxa/include/plat/gpio.h
index 44248cb..1ddd2b9 100644
--- a/arch/arm/plat-pxa/include/plat/gpio.h
+++ b/arch/arm/plat-pxa/include/plat/gpio.h
@@ -1,6 +1,8 @@
 #ifndef __PLAT_GPIO_H
 #define __PLAT_GPIO_H
 
+struct irq_data;
+
 /*
  * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
  * one set of registers. The register offsets are organized below:
@@ -56,7 +58,7 @@
  */
 extern int pxa_last_gpio;
 
-typedef int (*set_wake_t)(unsigned int irq, unsigned int on);
+typedef int (*set_wake_t)(struct irq_data *d, unsigned int on);
 
 extern void pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn);
 #endif /* __PLAT_GPIO_H */
diff --git a/arch/arm/plat-s3c24xx/devs.c b/arch/arm/plat-s3c24xx/devs.c
index 8a42bc4..268f3ed 100644
--- a/arch/arm/plat-s3c24xx/devs.c
+++ b/arch/arm/plat-s3c24xx/devs.c
@@ -194,7 +194,6 @@
 	memcpy(&s3c2410ts_info, hard_s3c2410ts_info, sizeof(struct s3c2410_ts_mach_info));
 	s3c_device_ts.dev.platform_data = &s3c2410ts_info;
 }
-EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
 
 /* USB Device (Gadget)*/
 
diff --git a/arch/arm/plat-s3c24xx/include/plat/irq.h b/arch/arm/plat-s3c24xx/include/plat/irq.h
index 69e1be8..ec087d6 100644
--- a/arch/arm/plat-s3c24xx/include/plat/irq.h
+++ b/arch/arm/plat-s3c24xx/include/plat/irq.h
@@ -107,9 +107,9 @@
 /* exported for use in arch/arm/mach-s3c2410 */
 
 #ifdef CONFIG_PM
-extern int s3c_irq_wake(unsigned int irqno, unsigned int state);
+extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
 #else
 #define s3c_irq_wake NULL
 #endif
 
-extern int s3c_irqext_type(unsigned int irq, unsigned int type);
+extern int s3c_irqext_type(struct irq_data *d, unsigned int type);
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/plat-s3c24xx/irq-pm.c
index ea8dea3..c3624d8 100644
--- a/arch/arm/plat-s3c24xx/irq-pm.c
+++ b/arch/arm/plat-s3c24xx/irq-pm.c
@@ -15,11 +15,14 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/sysdev.h>
+#include <linux/irq.h>
 
 #include <plat/cpu.h>
 #include <plat/pm.h>
 #include <plat/irq.h>
 
+#include <asm/irq.h>
+
 /* state for IRQs over sleep */
 
 /* default is to allow for EINT0..EINT15, and IRQ_RTC as wakeup sources
@@ -30,15 +33,15 @@
 unsigned long s3c_irqwake_intallow	= 1L << (IRQ_RTC - IRQ_EINT0) | 0xfL;
 unsigned long s3c_irqwake_eintallow	= 0x0000fff0L;
 
-int s3c_irq_wake(unsigned int irqno, unsigned int state)
+int s3c_irq_wake(struct irq_data *data, unsigned int state)
 {
-	unsigned long irqbit = 1 << (irqno - IRQ_EINT0);
+	unsigned long irqbit = 1 << (data->irq - IRQ_EINT0);
 
 	if (!(s3c_irqwake_intallow & irqbit))
 		return -ENOENT;
 
 	printk(KERN_INFO "wake %s for irq %d\n",
-	       state ? "enabled" : "disabled", irqno);
+	       state ? "enabled" : "disabled", data->irq);
 
 	if (!state)
 		s3c_irqwake_intmask |= irqbit;
diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c
index ad0d44e..4434cb5 100644
--- a/arch/arm/plat-s3c24xx/irq.c
+++ b/arch/arm/plat-s3c24xx/irq.c
@@ -34,30 +34,29 @@
 #include <plat/irq.h>
 
 static void
-s3c_irq_mask(unsigned int irqno)
+s3c_irq_mask(struct irq_data *data)
 {
+	unsigned int irqno = data->irq - IRQ_EINT0;
 	unsigned long mask;
 
-	irqno -= IRQ_EINT0;
-
 	mask = __raw_readl(S3C2410_INTMSK);
 	mask |= 1UL << irqno;
 	__raw_writel(mask, S3C2410_INTMSK);
 }
 
 static inline void
-s3c_irq_ack(unsigned int irqno)
+s3c_irq_ack(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 
 	__raw_writel(bitval, S3C2410_SRCPND);
 	__raw_writel(bitval, S3C2410_INTPND);
 }
 
 static inline void
-s3c_irq_maskack(unsigned int irqno)
+s3c_irq_maskack(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 	unsigned long mask;
 
 	mask = __raw_readl(S3C2410_INTMSK);
@@ -69,8 +68,9 @@
 
 
 static void
-s3c_irq_unmask(unsigned int irqno)
+s3c_irq_unmask(struct irq_data *data)
 {
+	unsigned int irqno = data->irq;
 	unsigned long mask;
 
 	if (irqno != IRQ_TIMER4 && irqno != IRQ_EINT8t23)
@@ -85,40 +85,39 @@
 
 struct irq_chip s3c_irq_level_chip = {
 	.name		= "s3c-level",
-	.ack		= s3c_irq_maskack,
-	.mask		= s3c_irq_mask,
-	.unmask		= s3c_irq_unmask,
-	.set_wake	= s3c_irq_wake
+	.irq_ack	= s3c_irq_maskack,
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_set_wake	= s3c_irq_wake
 };
 
 struct irq_chip s3c_irq_chip = {
 	.name		= "s3c",
-	.ack		= s3c_irq_ack,
-	.mask		= s3c_irq_mask,
-	.unmask		= s3c_irq_unmask,
-	.set_wake	= s3c_irq_wake
+	.irq_ack	= s3c_irq_ack,
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_set_wake	= s3c_irq_wake
 };
 
 static void
-s3c_irqext_mask(unsigned int irqno)
+s3c_irqext_mask(struct irq_data *data)
 {
+	unsigned int irqno = data->irq - EXTINT_OFF;
 	unsigned long mask;
 
-	irqno -= EXTINT_OFF;
-
 	mask = __raw_readl(S3C24XX_EINTMASK);
 	mask |= ( 1UL << irqno);
 	__raw_writel(mask, S3C24XX_EINTMASK);
 }
 
 static void
-s3c_irqext_ack(unsigned int irqno)
+s3c_irqext_ack(struct irq_data *data)
 {
 	unsigned long req;
 	unsigned long bit;
 	unsigned long mask;
 
-	bit = 1UL << (irqno - EXTINT_OFF);
+	bit = 1UL << (data->irq - EXTINT_OFF);
 
 	mask = __raw_readl(S3C24XX_EINTMASK);
 
@@ -129,64 +128,57 @@
 
 	/* not sure if we should be acking the parent irq... */
 
-	if (irqno <= IRQ_EINT7 ) {
+	if (data->irq <= IRQ_EINT7) {
 		if ((req & 0xf0) == 0)
-			s3c_irq_ack(IRQ_EINT4t7);
+			s3c_irq_ack(irq_get_irq_data(IRQ_EINT4t7));
 	} else {
 		if ((req >> 8) == 0)
-			s3c_irq_ack(IRQ_EINT8t23);
+			s3c_irq_ack(irq_get_irq_data(IRQ_EINT8t23));
 	}
 }
 
 static void
-s3c_irqext_unmask(unsigned int irqno)
+s3c_irqext_unmask(struct irq_data *data)
 {
+	unsigned int irqno = data->irq - EXTINT_OFF;
 	unsigned long mask;
 
-	irqno -= EXTINT_OFF;
-
 	mask = __raw_readl(S3C24XX_EINTMASK);
-	mask &= ~( 1UL << irqno);
+	mask &= ~(1UL << irqno);
 	__raw_writel(mask, S3C24XX_EINTMASK);
 }
 
 int
-s3c_irqext_type(unsigned int irq, unsigned int type)
+s3c_irqext_type(struct irq_data *data, unsigned int type)
 {
 	void __iomem *extint_reg;
 	void __iomem *gpcon_reg;
 	unsigned long gpcon_offset, extint_offset;
 	unsigned long newvalue = 0, value;
 
-	if ((irq >= IRQ_EINT0) && (irq <= IRQ_EINT3))
-	{
+	if ((data->irq >= IRQ_EINT0) && (data->irq <= IRQ_EINT3)) {
 		gpcon_reg = S3C2410_GPFCON;
 		extint_reg = S3C24XX_EXTINT0;
-		gpcon_offset = (irq - IRQ_EINT0) * 2;
-		extint_offset = (irq - IRQ_EINT0) * 4;
-	}
-	else if ((irq >= IRQ_EINT4) && (irq <= IRQ_EINT7))
-	{
+		gpcon_offset = (data->irq - IRQ_EINT0) * 2;
+		extint_offset = (data->irq - IRQ_EINT0) * 4;
+	} else if ((data->irq >= IRQ_EINT4) && (data->irq <= IRQ_EINT7)) {
 		gpcon_reg = S3C2410_GPFCON;
 		extint_reg = S3C24XX_EXTINT0;
-		gpcon_offset = (irq - (EXTINT_OFF)) * 2;
-		extint_offset = (irq - (EXTINT_OFF)) * 4;
-	}
-	else if ((irq >= IRQ_EINT8) && (irq <= IRQ_EINT15))
-	{
+		gpcon_offset = (data->irq - (EXTINT_OFF)) * 2;
+		extint_offset = (data->irq - (EXTINT_OFF)) * 4;
+	} else if ((data->irq >= IRQ_EINT8) && (data->irq <= IRQ_EINT15)) {
 		gpcon_reg = S3C2410_GPGCON;
 		extint_reg = S3C24XX_EXTINT1;
-		gpcon_offset = (irq - IRQ_EINT8) * 2;
-		extint_offset = (irq - IRQ_EINT8) * 4;
-	}
-	else if ((irq >= IRQ_EINT16) && (irq <= IRQ_EINT23))
-	{
+		gpcon_offset = (data->irq - IRQ_EINT8) * 2;
+		extint_offset = (data->irq - IRQ_EINT8) * 4;
+	} else if ((data->irq >= IRQ_EINT16) && (data->irq <= IRQ_EINT23)) {
 		gpcon_reg = S3C2410_GPGCON;
 		extint_reg = S3C24XX_EXTINT2;
-		gpcon_offset = (irq - IRQ_EINT8) * 2;
-		extint_offset = (irq - IRQ_EINT16) * 4;
-	} else
+		gpcon_offset = (data->irq - IRQ_EINT8) * 2;
+		extint_offset = (data->irq - IRQ_EINT16) * 4;
+	} else {
 		return -1;
+	}
 
 	/* Set the GPIO to external interrupt mode */
 	value = __raw_readl(gpcon_reg);
@@ -234,20 +226,20 @@
 
 static struct irq_chip s3c_irqext_chip = {
 	.name		= "s3c-ext",
-	.mask		= s3c_irqext_mask,
-	.unmask		= s3c_irqext_unmask,
-	.ack		= s3c_irqext_ack,
-	.set_type	= s3c_irqext_type,
-	.set_wake	= s3c_irqext_wake
+	.irq_mask	= s3c_irqext_mask,
+	.irq_unmask	= s3c_irqext_unmask,
+	.irq_ack	= s3c_irqext_ack,
+	.irq_set_type	= s3c_irqext_type,
+	.irq_set_wake	= s3c_irqext_wake
 };
 
 static struct irq_chip s3c_irq_eint0t4 = {
 	.name		= "s3c-ext0",
-	.ack		= s3c_irq_ack,
-	.mask		= s3c_irq_mask,
-	.unmask		= s3c_irq_unmask,
-	.set_wake	= s3c_irq_wake,
-	.set_type	= s3c_irqext_type,
+	.irq_ack	= s3c_irq_ack,
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_set_wake	= s3c_irq_wake,
+	.irq_set_type	= s3c_irqext_type,
 };
 
 /* mask values for the parent registers for each of the interrupt types */
@@ -261,109 +253,109 @@
 /* UART0 */
 
 static void
-s3c_irq_uart0_mask(unsigned int irqno)
+s3c_irq_uart0_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART0, 7);
+	s3c_irqsub_mask(data->irq, INTMSK_UART0, 7);
 }
 
 static void
-s3c_irq_uart0_unmask(unsigned int irqno)
+s3c_irq_uart0_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART0);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART0);
 }
 
 static void
-s3c_irq_uart0_ack(unsigned int irqno)
+s3c_irq_uart0_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART0, 7);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART0, 7);
 }
 
 static struct irq_chip s3c_irq_uart0 = {
 	.name		= "s3c-uart0",
-	.mask		= s3c_irq_uart0_mask,
-	.unmask		= s3c_irq_uart0_unmask,
-	.ack		= s3c_irq_uart0_ack,
+	.irq_mask	= s3c_irq_uart0_mask,
+	.irq_unmask	= s3c_irq_uart0_unmask,
+	.irq_ack	= s3c_irq_uart0_ack,
 };
 
 /* UART1 */
 
 static void
-s3c_irq_uart1_mask(unsigned int irqno)
+s3c_irq_uart1_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART1, 7 << 3);
+	s3c_irqsub_mask(data->irq, INTMSK_UART1, 7 << 3);
 }
 
 static void
-s3c_irq_uart1_unmask(unsigned int irqno)
+s3c_irq_uart1_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART1);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART1);
 }
 
 static void
-s3c_irq_uart1_ack(unsigned int irqno)
+s3c_irq_uart1_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART1, 7 << 3);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART1, 7 << 3);
 }
 
 static struct irq_chip s3c_irq_uart1 = {
 	.name		= "s3c-uart1",
-	.mask		= s3c_irq_uart1_mask,
-	.unmask		= s3c_irq_uart1_unmask,
-	.ack		= s3c_irq_uart1_ack,
+	.irq_mask	= s3c_irq_uart1_mask,
+	.irq_unmask	= s3c_irq_uart1_unmask,
+	.irq_ack	= s3c_irq_uart1_ack,
 };
 
 /* UART2 */
 
 static void
-s3c_irq_uart2_mask(unsigned int irqno)
+s3c_irq_uart2_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART2, 7 << 6);
+	s3c_irqsub_mask(data->irq, INTMSK_UART2, 7 << 6);
 }
 
 static void
-s3c_irq_uart2_unmask(unsigned int irqno)
+s3c_irq_uart2_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART2);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART2);
 }
 
 static void
-s3c_irq_uart2_ack(unsigned int irqno)
+s3c_irq_uart2_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART2, 7 << 6);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART2, 7 << 6);
 }
 
 static struct irq_chip s3c_irq_uart2 = {
 	.name		= "s3c-uart2",
-	.mask		= s3c_irq_uart2_mask,
-	.unmask		= s3c_irq_uart2_unmask,
-	.ack		= s3c_irq_uart2_ack,
+	.irq_mask	= s3c_irq_uart2_mask,
+	.irq_unmask	= s3c_irq_uart2_unmask,
+	.irq_ack	= s3c_irq_uart2_ack,
 };
 
 /* ADC and Touchscreen */
 
 static void
-s3c_irq_adc_mask(unsigned int irqno)
+s3c_irq_adc_mask(struct irq_data *d)
 {
-	s3c_irqsub_mask(irqno, INTMSK_ADCPARENT, 3 << 9);
+	s3c_irqsub_mask(d->irq, INTMSK_ADCPARENT, 3 << 9);
 }
 
 static void
-s3c_irq_adc_unmask(unsigned int irqno)
+s3c_irq_adc_unmask(struct irq_data *d)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_ADCPARENT);
+	s3c_irqsub_unmask(d->irq, INTMSK_ADCPARENT);
 }
 
 static void
-s3c_irq_adc_ack(unsigned int irqno)
+s3c_irq_adc_ack(struct irq_data *d)
 {
-	s3c_irqsub_ack(irqno, INTMSK_ADCPARENT, 3 << 9);
+	s3c_irqsub_ack(d->irq, INTMSK_ADCPARENT, 3 << 9);
 }
 
 static struct irq_chip s3c_irq_adc = {
 	.name		= "s3c-adc",
-	.mask		= s3c_irq_adc_mask,
-	.unmask		= s3c_irq_adc_unmask,
-	.ack		= s3c_irq_adc_ack,
+	.irq_mask	= s3c_irq_adc_mask,
+	.irq_unmask	= s3c_irq_adc_unmask,
+	.irq_ack	= s3c_irq_adc_ack,
 };
 
 /* irq demux for adc */
diff --git a/arch/arm/plat-s3c24xx/s3c2443-clock.c b/arch/arm/plat-s3c24xx/s3c2443-clock.c
index 461f070..82f2d4a 100644
--- a/arch/arm/plat-s3c24xx/s3c2443-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c2443-clock.c
@@ -271,7 +271,7 @@
 		.ctrlbit	= S3C2443_HCLKCON_DMA5,
 	}, {
 		.name		= "hsmmc",
-		.id		= 0,
+		.id		= 1,
 		.parent		= &clk_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.ctrlbit	= S3C2443_HCLKCON_HSMMC,
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 65dbfa8..deb3995 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -56,3 +56,29 @@
 	bool
 	help
 	  Compile in platform device definition for OneNAND controller
+
+config S5P_DEV_CSIS0
+	bool
+	help
+	  Compile in platform device definitions for MIPI-CSIS channel 0
+
+config S5P_DEV_CSIS1
+	bool
+	help
+	  Compile in platform device definitions for MIPI-CSIS channel 1
+
+menuconfig S5P_SYSMMU
+	bool "SYSMMU support"
+	depends on ARCH_S5PV310
+	help
+	  This is a System MMU driver for Samsung ARM based Soc.
+
+if S5P_SYSMMU
+
+config S5P_SYSMMU_DEBUG
+	bool "Enables debug messages"
+	depends on S5P_SYSMMU
+	help
+	  This enables SYSMMU driver debug massages.
+
+endif
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
index de65238..92efe1a 100644
--- a/arch/arm/plat-s5p/Makefile
+++ b/arch/arm/plat-s5p/Makefile
@@ -28,3 +28,6 @@
 obj-$(CONFIG_S5P_DEV_FIMC1)	+= dev-fimc1.o
 obj-$(CONFIG_S5P_DEV_FIMC2)	+= dev-fimc2.o
 obj-$(CONFIG_S5P_DEV_ONENAND)	+= dev-onenand.o
+obj-$(CONFIG_S5P_DEV_CSIS0)	+= dev-csis0.o
+obj-$(CONFIG_S5P_DEV_CSIS1)	+= dev-csis1.o
+obj-$(CONFIG_S5P_SYSMMU)	+= sysmmu.o
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
index 74f7f5a..047d31c 100644
--- a/arch/arm/plat-s5p/cpu.c
+++ b/arch/arm/plat-s5p/cpu.c
@@ -108,6 +108,11 @@
 		.pfn		= __phys_to_pfn(S3C_PA_WDT),
 		.length		= SZ_4K,
 		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_SROMC,
+		.pfn		= __phys_to_pfn(S5P_PA_SROMC),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
 	},
 };
 
diff --git a/arch/arm/plat-s5p/dev-csis0.c b/arch/arm/plat-s5p/dev-csis0.c
new file mode 100644
index 0000000..dfab1c8
--- /dev/null
+++ b/arch/arm/plat-s5p/dev-csis0.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * S5P series device definition for MIPI-CSIS channel 0
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <mach/map.h>
+
+static struct resource s5p_mipi_csis0_resource[] = {
+	[0] = {
+		.start = S5P_PA_MIPI_CSIS0,
+		.end   = S5P_PA_MIPI_CSIS0 + SZ_4K - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_MIPI_CSIS0,
+		.end   = IRQ_MIPI_CSIS0,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+struct platform_device s5p_device_mipi_csis0 = {
+	.name		  = "s5p-mipi-csis",
+	.id		  = 0,
+	.num_resources	  = ARRAY_SIZE(s5p_mipi_csis0_resource),
+	.resource	  = s5p_mipi_csis0_resource,
+};
diff --git a/arch/arm/plat-s5p/dev-csis1.c b/arch/arm/plat-s5p/dev-csis1.c
new file mode 100644
index 0000000..e3053f2
--- /dev/null
+++ b/arch/arm/plat-s5p/dev-csis1.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * S5P series device definition for MIPI-CSIS channel 1
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <mach/map.h>
+
+static struct resource s5p_mipi_csis1_resource[] = {
+	[0] = {
+		.start = S5P_PA_MIPI_CSIS1,
+		.end   = S5P_PA_MIPI_CSIS1 + SZ_4K - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_MIPI_CSIS1,
+		.end   = IRQ_MIPI_CSIS1,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+struct platform_device s5p_device_mipi_csis1 = {
+	.name		  = "s5p-mipi-csis",
+	.id		  = 1,
+	.num_resources	  = ARRAY_SIZE(s5p_mipi_csis1_resource),
+	.resource	  = s5p_mipi_csis1_resource,
+};
diff --git a/arch/arm/plat-s5p/include/plat/csis.h b/arch/arm/plat-s5p/include/plat/csis.h
new file mode 100644
index 0000000..51e308c
--- /dev/null
+++ b/arch/arm/plat-s5p/include/plat/csis.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * S5P series MIPI CSI slave device support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef PLAT_S5P_CSIS_H_
+#define PLAT_S5P_CSIS_H_ __FILE__
+
+/**
+ * struct s5p_platform_mipi_csis - platform data for MIPI-CSIS
+ * @clk_rate: bus clock frequency
+ * @lanes: number of data lanes used
+ * @alignment: data alignment in bits
+ * @hs_settle: HS-RX settle time
+ */
+struct s5p_platform_mipi_csis {
+	unsigned long clk_rate;
+	u8 lanes;
+	u8 alignment;
+	u8 hs_settle;
+};
+
+#endif /* PLAT_S5P_CSIS_H_ */
diff --git a/arch/arm/plat-s5p/include/plat/map-s5p.h b/arch/arm/plat-s5p/include/plat/map-s5p.h
index fef353d..d973d39 100644
--- a/arch/arm/plat-s5p/include/plat/map-s5p.h
+++ b/arch/arm/plat-s5p/include/plat/map-s5p.h
@@ -15,6 +15,7 @@
 
 #define S5P_VA_CHIPID		S3C_ADDR(0x02000000)
 #define S5P_VA_CMU		S3C_ADDR(0x02100000)
+#define S5P_VA_PMU		S3C_ADDR(0x02180000)
 #define S5P_VA_GPIO		S3C_ADDR(0x02200000)
 #define S5P_VA_GPIO1		S5P_VA_GPIO
 #define S5P_VA_GPIO2		S3C_ADDR(0x02240000)
diff --git a/arch/arm/plat-s5p/include/plat/regs-srom.h b/arch/arm/plat-s5p/include/plat/regs-srom.h
new file mode 100644
index 0000000..f121ab5
--- /dev/null
+++ b/arch/arm/plat-s5p/include/plat/regs-srom.h
@@ -0,0 +1,54 @@
+/* linux/arch/arm/plat-s5p/include/plat/regs-srom.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5P SROMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_PLAT_S5P_REGS_SROM_H
+#define __ASM_PLAT_S5P_REGS_SROM_H __FILE__
+
+#include <mach/map.h>
+
+#define S5P_SROMREG(x)		(S5P_VA_SROMC + (x))
+
+#define S5P_SROM_BW		S5P_SROMREG(0x0)
+#define S5P_SROM_BC0		S5P_SROMREG(0x4)
+#define S5P_SROM_BC1		S5P_SROMREG(0x8)
+#define S5P_SROM_BC2		S5P_SROMREG(0xc)
+#define S5P_SROM_BC3		S5P_SROMREG(0x10)
+#define S5P_SROM_BC4		S5P_SROMREG(0x14)
+#define S5P_SROM_BC5		S5P_SROMREG(0x18)
+
+/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
+
+#define S5P_SROM_BW__DATAWIDTH__SHIFT		0
+#define S5P_SROM_BW__ADDRMODE__SHIFT		1
+#define S5P_SROM_BW__WAITENABLE__SHIFT		2
+#define S5P_SROM_BW__BYTEENABLE__SHIFT		3
+
+#define S5P_SROM_BW__CS_MASK			0xf
+
+#define S5P_SROM_BW__NCS0__SHIFT		0
+#define S5P_SROM_BW__NCS1__SHIFT		4
+#define S5P_SROM_BW__NCS2__SHIFT		8
+#define S5P_SROM_BW__NCS3__SHIFT		12
+#define S5P_SROM_BW__NCS4__SHIFT		16
+#define S5P_SROM_BW__NCS5__SHIFT		20
+
+/* applies to same to BCS0 - BCS3 */
+
+#define S5P_SROM_BCX__PMC__SHIFT		0
+#define S5P_SROM_BCX__TACP__SHIFT		4
+#define S5P_SROM_BCX__TCAH__SHIFT		8
+#define S5P_SROM_BCX__TCOH__SHIFT		12
+#define S5P_SROM_BCX__TACC__SHIFT		16
+#define S5P_SROM_BCX__TCOS__SHIFT		24
+#define S5P_SROM_BCX__TACS__SHIFT		28
+
+#endif /* __ASM_PLAT_S5P_REGS_SROM_H */
diff --git a/arch/arm/plat-s5p/include/plat/sysmmu.h b/arch/arm/plat-s5p/include/plat/sysmmu.h
new file mode 100644
index 0000000..db298fc
--- /dev/null
+++ b/arch/arm/plat-s5p/include/plat/sysmmu.h
@@ -0,0 +1,23 @@
+/* linux/arch/arm/plat-s5p/include/plat/sysmmu.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Samsung sysmmu driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_PLAT_S5P_SYSMMU_H
+#define __ASM_PLAT_S5P_SYSMMU_H __FILE__
+
+/* debug macro */
+#ifdef CONFIG_S5P_SYSMMU_DEBUG
+#define sysmmu_debug(fmt, arg...)	printk(KERN_INFO "[%s] " fmt, __func__, ## arg)
+#else
+#define sysmmu_debug(fmt, arg...)	do { } while (0)
+#endif
+
+#endif /* __ASM_PLAT_S5P_SYSMMU_H */
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c
index 752f1a6..225aa25 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-s5p/irq-eint.c
@@ -28,39 +28,40 @@
 #include <plat/gpio-cfg.h>
 #include <mach/regs-gpio.h>
 
-static inline void s5p_irq_eint_mask(unsigned int irq)
+static inline void s5p_irq_eint_mask(struct irq_data *data)
 {
 	u32 mask;
 
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
-	mask |= eint_irq_to_bit(irq);
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask |= eint_irq_to_bit(data->irq);
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
 }
 
-static void s5p_irq_eint_unmask(unsigned int irq)
+static void s5p_irq_eint_unmask(struct irq_data *data)
 {
 	u32 mask;
 
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
-	mask &= ~(eint_irq_to_bit(irq));
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask &= ~(eint_irq_to_bit(data->irq));
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
 }
 
-static inline void s5p_irq_eint_ack(unsigned int irq)
+static inline void s5p_irq_eint_ack(struct irq_data *data)
 {
-	__raw_writel(eint_irq_to_bit(irq), S5P_EINT_PEND(EINT_REG_NR(irq)));
+	__raw_writel(eint_irq_to_bit(data->irq),
+		     S5P_EINT_PEND(EINT_REG_NR(data->irq)));
 }
 
-static void s5p_irq_eint_maskack(unsigned int irq)
+static void s5p_irq_eint_maskack(struct irq_data *data)
 {
 	/* compiler should in-line these */
-	s5p_irq_eint_mask(irq);
-	s5p_irq_eint_ack(irq);
+	s5p_irq_eint_mask(data);
+	s5p_irq_eint_ack(data);
 }
 
-static int s5p_irq_eint_set_type(unsigned int irq, unsigned int type)
+static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type)
 {
-	int offs = EINT_OFFSET(irq);
+	int offs = EINT_OFFSET(data->irq);
 	int shift;
 	u32 ctrl, mask;
 	u32 newvalue = 0;
@@ -94,10 +95,10 @@
 	shift = (offs & 0x7) * 4;
 	mask = 0x7 << shift;
 
-	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(irq)));
+	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
 	ctrl &= ~mask;
 	ctrl |= newvalue << shift;
-	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(irq)));
+	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
 
 	if ((0 <= offs) && (offs < 8))
 		s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
@@ -119,13 +120,13 @@
 
 static struct irq_chip s5p_irq_eint = {
 	.name		= "s5p-eint",
-	.mask		= s5p_irq_eint_mask,
-	.unmask		= s5p_irq_eint_unmask,
-	.mask_ack	= s5p_irq_eint_maskack,
-	.ack		= s5p_irq_eint_ack,
-	.set_type	= s5p_irq_eint_set_type,
+	.irq_mask	= s5p_irq_eint_mask,
+	.irq_unmask	= s5p_irq_eint_unmask,
+	.irq_mask_ack	= s5p_irq_eint_maskack,
+	.irq_ack	= s5p_irq_eint_ack,
+	.irq_set_type	= s5p_irq_eint_set_type,
 #ifdef CONFIG_PM
-	.set_wake	= s3c_irqext_wake,
+	.irq_set_wake	= s3c_irqext_wake,
 #endif
 };
 
@@ -159,42 +160,43 @@
 	s5p_irq_demux_eint(IRQ_EINT(24));
 }
 
-static inline void s5p_irq_vic_eint_mask(unsigned int irq)
+static inline void s5p_irq_vic_eint_mask(struct irq_data *data)
 {
-	void __iomem *base = get_irq_chip_data(irq);
+	void __iomem *base = irq_data_get_irq_chip_data(data);
 
-	s5p_irq_eint_mask(irq);
-	writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE_CLEAR);
+	s5p_irq_eint_mask(data);
+	writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE_CLEAR);
 }
 
-static void s5p_irq_vic_eint_unmask(unsigned int irq)
+static void s5p_irq_vic_eint_unmask(struct irq_data *data)
 {
-	void __iomem *base = get_irq_chip_data(irq);
+	void __iomem *base = irq_data_get_irq_chip_data(data);
 
-	s5p_irq_eint_unmask(irq);
-	writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE);
+	s5p_irq_eint_unmask(data);
+	writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE);
 }
 
-static inline void s5p_irq_vic_eint_ack(unsigned int irq)
+static inline void s5p_irq_vic_eint_ack(struct irq_data *data)
 {
-	__raw_writel(eint_irq_to_bit(irq), S5P_EINT_PEND(EINT_REG_NR(irq)));
+	__raw_writel(eint_irq_to_bit(data->irq),
+		     S5P_EINT_PEND(EINT_REG_NR(data->irq)));
 }
 
-static void s5p_irq_vic_eint_maskack(unsigned int irq)
+static void s5p_irq_vic_eint_maskack(struct irq_data *data)
 {
-	s5p_irq_vic_eint_mask(irq);
-	s5p_irq_vic_eint_ack(irq);
+	s5p_irq_vic_eint_mask(data);
+	s5p_irq_vic_eint_ack(data);
 }
 
 static struct irq_chip s5p_irq_vic_eint = {
 	.name		= "s5p_vic_eint",
-	.mask		= s5p_irq_vic_eint_mask,
-	.unmask		= s5p_irq_vic_eint_unmask,
-	.mask_ack	= s5p_irq_vic_eint_maskack,
-	.ack		= s5p_irq_vic_eint_ack,
-	.set_type	= s5p_irq_eint_set_type,
+	.irq_mask	= s5p_irq_vic_eint_mask,
+	.irq_unmask	= s5p_irq_vic_eint_unmask,
+	.irq_mask_ack	= s5p_irq_vic_eint_maskack,
+	.irq_ack	= s5p_irq_vic_eint_ack,
+	.irq_set_type	= s5p_irq_eint_set_type,
 #ifdef CONFIG_PM
-	.set_wake	= s3c_irqext_wake,
+	.irq_set_wake	= s3c_irqext_wake,
 #endif
 };
 
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index 0e5dc8c..3b6bf89 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -30,9 +30,9 @@
 
 static struct s3c_gpio_chip *irq_chips[S5P_GPIOINT_GROUP_MAXNR];
 
-static int s5p_gpioint_get_group(unsigned int irq)
+static int s5p_gpioint_get_group(struct irq_data *data)
 {
-	struct gpio_chip *chip = get_irq_data(irq);
+	struct gpio_chip *chip = irq_data_get_irq_data(data);
 	struct s3c_gpio_chip *s3c_chip = container_of(chip,
 			struct s3c_gpio_chip, chip);
 	int group;
@@ -44,22 +44,22 @@
 	return group;
 }
 
-static int s5p_gpioint_get_offset(unsigned int irq)
+static int s5p_gpioint_get_offset(struct irq_data *data)
 {
-	struct gpio_chip *chip = get_irq_data(irq);
+	struct gpio_chip *chip = irq_data_get_irq_data(data);
 	struct s3c_gpio_chip *s3c_chip = container_of(chip,
 			struct s3c_gpio_chip, chip);
 
-	return irq - s3c_chip->irq_base;
+	return data->irq - s3c_chip->irq_base;
 }
 
-static void s5p_gpioint_ack(unsigned int irq)
+static void s5p_gpioint_ack(struct irq_data *data)
 {
 	int group, offset, pend_offset;
 	unsigned int value;
 
-	group = s5p_gpioint_get_group(irq);
-	offset = s5p_gpioint_get_offset(irq);
+	group = s5p_gpioint_get_group(data);
+	offset = s5p_gpioint_get_offset(data);
 	pend_offset = group << 2;
 
 	value = __raw_readl(S5P_GPIOREG(GPIOINT_PEND_OFFSET) + pend_offset);
@@ -67,13 +67,13 @@
 	__raw_writel(value, S5P_GPIOREG(GPIOINT_PEND_OFFSET) + pend_offset);
 }
 
-static void s5p_gpioint_mask(unsigned int irq)
+static void s5p_gpioint_mask(struct irq_data *data)
 {
 	int group, offset, mask_offset;
 	unsigned int value;
 
-	group = s5p_gpioint_get_group(irq);
-	offset = s5p_gpioint_get_offset(irq);
+	group = s5p_gpioint_get_group(data);
+	offset = s5p_gpioint_get_offset(data);
 	mask_offset = group << 2;
 
 	value = __raw_readl(S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
@@ -81,13 +81,13 @@
 	__raw_writel(value, S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
 }
 
-static void s5p_gpioint_unmask(unsigned int irq)
+static void s5p_gpioint_unmask(struct irq_data *data)
 {
 	int group, offset, mask_offset;
 	unsigned int value;
 
-	group = s5p_gpioint_get_group(irq);
-	offset = s5p_gpioint_get_offset(irq);
+	group = s5p_gpioint_get_group(data);
+	offset = s5p_gpioint_get_offset(data);
 	mask_offset = group << 2;
 
 	value = __raw_readl(S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
@@ -95,19 +95,19 @@
 	__raw_writel(value, S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
 }
 
-static void s5p_gpioint_mask_ack(unsigned int irq)
+static void s5p_gpioint_mask_ack(struct irq_data *data)
 {
-	s5p_gpioint_mask(irq);
-	s5p_gpioint_ack(irq);
+	s5p_gpioint_mask(data);
+	s5p_gpioint_ack(data);
 }
 
-static int s5p_gpioint_set_type(unsigned int irq, unsigned int type)
+static int s5p_gpioint_set_type(struct irq_data *data, unsigned int type)
 {
 	int group, offset, con_offset;
 	unsigned int value;
 
-	group = s5p_gpioint_get_group(irq);
-	offset = s5p_gpioint_get_offset(irq);
+	group = s5p_gpioint_get_group(data);
+	offset = s5p_gpioint_get_offset(data);
 	con_offset = group << 2;
 
 	switch (type) {
@@ -142,11 +142,11 @@
 
 struct irq_chip s5p_gpioint = {
 	.name		= "s5p_gpioint",
-	.ack		= s5p_gpioint_ack,
-	.mask		= s5p_gpioint_mask,
-	.mask_ack	= s5p_gpioint_mask_ack,
-	.unmask		= s5p_gpioint_unmask,
-	.set_type	= s5p_gpioint_set_type,
+	.irq_ack	= s5p_gpioint_ack,
+	.irq_mask	= s5p_gpioint_mask,
+	.irq_mask_ack	= s5p_gpioint_mask_ack,
+	.irq_unmask	= s5p_gpioint_unmask,
+	.irq_set_type	= s5p_gpioint_set_type,
 };
 
 static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-s5p/irq-pm.c
index dc33b9e..5259ad4 100644
--- a/arch/arm/plat-s5p/irq-pm.c
+++ b/arch/arm/plat-s5p/irq-pm.c
@@ -37,14 +37,14 @@
 unsigned long s3c_irqwake_intallow	= 0x00000006L;
 unsigned long s3c_irqwake_eintallow	= 0xffffffffL;
 
-int s3c_irq_wake(unsigned int irqno, unsigned int state)
+int s3c_irq_wake(struct irq_data *data, unsigned int state)
 {
 	unsigned long irqbit;
 
-	switch (irqno) {
+	switch (data->irq) {
 	case IRQ_RTC_TIC:
 	case IRQ_RTC_ALARM:
-		irqbit = 1 << (irqno + 1 - IRQ_RTC_ALARM);
+		irqbit = 1 << (data->irq + 1 - IRQ_RTC_ALARM);
 		if (!state)
 			s3c_irqwake_intmask |= irqbit;
 		else
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
new file mode 100644
index 0000000..d804914
--- /dev/null
+++ b/arch/arm/plat-s5p/sysmmu.c
@@ -0,0 +1,328 @@
+/* linux/arch/arm/plat-s5p/sysmmu.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <mach/map.h>
+#include <mach/regs-sysmmu.h>
+#include <mach/sysmmu.h>
+
+#include <plat/sysmmu.h>
+
+struct sysmmu_controller s5p_sysmmu_cntlrs[S5P_SYSMMU_TOTAL_IPNUM];
+
+void s5p_sysmmu_register(struct sysmmu_controller *sysmmuconp)
+{
+	unsigned int reg_mmu_ctrl;
+	unsigned int reg_mmu_status;
+	unsigned int reg_pt_base_addr;
+	unsigned int reg_int_status;
+	unsigned int reg_page_ft_addr;
+
+	reg_int_status = __raw_readl(sysmmuconp->regs + S5P_INT_STATUS);
+	reg_mmu_ctrl = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+	reg_mmu_status = __raw_readl(sysmmuconp->regs + S5P_MMU_STATUS);
+	reg_pt_base_addr = __raw_readl(sysmmuconp->regs + S5P_PT_BASE_ADDR);
+	reg_page_ft_addr = __raw_readl(sysmmuconp->regs + S5P_PAGE_FAULT_ADDR);
+
+	printk(KERN_INFO "%s: ips:%s\n", __func__, sysmmuconp->name);
+	printk(KERN_INFO "%s: MMU_CTRL:0x%X, ", __func__, reg_mmu_ctrl);
+	printk(KERN_INFO "MMU_STATUS:0x%X, PT_BASE_ADDR:0x%X\n", reg_mmu_status, reg_pt_base_addr);
+	printk(KERN_INFO "%s: INT_STATUS:0x%X, PAGE_FAULT_ADDR:0x%X\n", __func__, reg_int_status, reg_page_ft_addr);
+
+	switch (reg_int_status & 0xFF) {
+	case 0x1:
+		printk(KERN_INFO "%s: Page fault\n", __func__);
+		printk(KERN_INFO "%s: Virtual address causing last page fault or bus error : 0x%x\n", __func__ , reg_page_ft_addr);
+		break;
+	case 0x2:
+		printk(KERN_INFO "%s: AR multi-hit fault\n", __func__);
+		break;
+	case 0x4:
+		printk(KERN_INFO "%s: AW multi-hit fault\n", __func__);
+		break;
+	case 0x8:
+		printk(KERN_INFO "%s: Bus error\n", __func__);
+		break;
+	case 0x10:
+		printk(KERN_INFO "%s: AR Security protection fault\n", __func__);
+		break;
+	case 0x20:
+		printk(KERN_INFO "%s: AR Access protection fault\n", __func__);
+		break;
+	case 0x40:
+		printk(KERN_INFO "%s: AW Security protection fault\n", __func__);
+		break;
+	case 0x80:
+		printk(KERN_INFO "%s: AW Access protection fault\n", __func__);
+		break;
+	}
+}
+
+static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id)
+{
+	unsigned int i;
+	unsigned int reg_int_status;
+	struct sysmmu_controller *sysmmuconp;
+
+	for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
+		sysmmuconp = &s5p_sysmmu_cntlrs[i];
+
+		if (sysmmuconp->enable == true) {
+			reg_int_status = __raw_readl(sysmmuconp->regs + S5P_INT_STATUS);
+
+			if (reg_int_status & 0xFF)
+				s5p_sysmmu_register(sysmmuconp);
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+int s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd)
+{
+	struct sysmmu_controller *sysmmuconp = NULL;
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	/* Set sysmmu page table base address */
+	__raw_writel(pgd, sysmmuconp->regs + S5P_PT_BASE_ADDR);
+
+	if (s5p_sysmmu_tlb_invalidate(ips) != 0)
+		printk(KERN_ERR "failed s5p_sysmmu_tlb_invalidate\n");
+
+	return 0;
+}
+
+static int s5p_sysmmu_set_tablebase(sysmmu_ips ips)
+{
+	unsigned int pg;
+	struct sysmmu_controller *sysmmuconp;
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	__asm__("mrc	p15, 0, %0, c2, c0, 0"	\
+		: "=r" (pg) : : "cc");		\
+		pg &= ~0x3fff;
+
+	sysmmu_debug("CP15 TTBR0 : 0x%x\n", pg);
+
+	/* Set sysmmu page table base address */
+	__raw_writel(pg, sysmmuconp->regs + S5P_PT_BASE_ADDR);
+
+	return 0;
+}
+
+int s5p_sysmmu_enable(sysmmu_ips ips)
+{
+	unsigned int reg;
+
+	struct sysmmu_controller *sysmmuconp;
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	s5p_sysmmu_set_tablebase(ips);
+
+	/* replacement policy : LRU */
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CFG);
+	reg |= 0x1;
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CFG);
+
+	/* Enable interrupt, Enable MMU */
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+	reg |= (0x1 << 2) | (0x1 << 0);
+
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CTRL);
+
+	sysmmuconp->enable = true;
+
+	return 0;
+}
+
+int s5p_sysmmu_disable(sysmmu_ips ips)
+{
+	unsigned int reg;
+
+	struct sysmmu_controller *sysmmuconp = NULL;
+
+	if (ips > S5P_SYSMMU_TOTAL_IPNUM)
+		printk(KERN_ERR "failed to get ips parameter\n");
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CFG);
+
+	/* replacement policy : LRU */
+	reg |= 0x1;
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CFG);
+
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+
+	/* Disable MMU */
+	reg &= ~0x1;
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CTRL);
+
+	sysmmuconp->enable = false;
+
+	return 0;
+}
+
+int s5p_sysmmu_tlb_invalidate(sysmmu_ips ips)
+{
+	unsigned int reg;
+	struct sysmmu_controller *sysmmuconp = NULL;
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	/* set Block MMU for flush TLB */
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+	reg |= 0x1 << 1;
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CTRL);
+
+	/* flush all TLB entry */
+	__raw_writel(0x1, sysmmuconp->regs + S5P_MMU_FLUSH);
+
+	/* set Un-block MMU after flush TLB */
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+	reg &= ~(0x1 << 1);
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CTRL);
+
+	return 0;
+}
+
+static int s5p_sysmmu_probe(struct platform_device *pdev)
+{
+	int i;
+	int ret;
+	struct resource *res;
+	struct sysmmu_controller *sysmmuconp;
+	sysmmu_ips ips;
+
+	for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
+		sysmmuconp = &s5p_sysmmu_cntlrs[i];
+		if (sysmmuconp == NULL) {
+			printk(KERN_ERR "failed to get ip's sysmmu info\n");
+			ret = -ENOENT;
+			goto err_res;
+		}
+
+		sysmmuconp->name = sysmmu_ips_name[i];
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res) {
+			printk(KERN_ERR "failed to get sysmmu resource\n");
+			ret = -ENODEV;
+			goto err_res;
+		}
+
+		sysmmuconp->mem = request_mem_region(res->start,
+				((res->end) - (res->start)) + 1, pdev->name);
+		if (!sysmmuconp->mem) {
+			pr_err("failed to request sysmmu memory region\n");
+			ret = -EBUSY;
+			goto err_res;
+		}
+
+		sysmmuconp->regs = ioremap(res->start, res->end - res->start + 1);
+		if (!sysmmuconp->regs) {
+			pr_err("failed to sysmmu ioremap\n");
+			ret = -ENXIO;
+			goto err_reg;
+		}
+
+		sysmmuconp->irq = platform_get_irq(pdev, i);
+		if (sysmmuconp->irq <= 0) {
+			pr_err("failed to get sysmmu irq resource\n");
+			ret = -ENOENT;
+			goto err_map;
+		}
+
+		ret = request_irq(sysmmuconp->irq, s5p_sysmmu_irq, IRQF_DISABLED, pdev->name, sysmmuconp);
+		if (ret) {
+			pr_err("failed to request irq\n");
+			ret = -ENOENT;
+			goto err_map;
+		}
+
+		ips = (sysmmu_ips)i;
+
+		sysmmuconp->ips = ips;
+	}
+
+	return 0;
+
+err_reg:
+	release_mem_region((resource_size_t)sysmmuconp->mem, (resource_size_t)((res->end) - (res->start) + 1));
+err_map:
+	iounmap(sysmmuconp->regs);
+err_res:
+	return ret;
+}
+
+static int s5p_sysmmu_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+int s5p_sysmmu_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+int s5p_sysmmu_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+const struct dev_pm_ops s5p_sysmmu_pm_ops = {
+	.runtime_suspend	= s5p_sysmmu_runtime_suspend,
+	.runtime_resume		= s5p_sysmmu_runtime_resume,
+};
+
+static struct platform_driver s5p_sysmmu_driver = {
+	.probe		= s5p_sysmmu_probe,
+	.remove		= s5p_sysmmu_remove,
+	.driver		= {
+		.owner		= THIS_MODULE,
+		.name		= "s5p-sysmmu",
+		.pm		= &s5p_sysmmu_pm_ops,
+	}
+};
+
+static int __init s5p_sysmmu_init(void)
+{
+	return platform_driver_register(&s5p_sysmmu_driver);
+}
+arch_initcall(s5p_sysmmu_init);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index dcd6eff4e..32be05c 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -95,6 +95,12 @@
 	help
 	  Internal configuration to enable the correct GPIO pull helper
 
+config S3C_GPIO_PULL_S3C2443
+	bool
+	select S3C_GPIO_PULL_UPDOWN
+	help
+	  Internal configuration to enable the correct GPIO pull helper for S3C2443-style GPIO
+
 config S3C_GPIO_PULL_DOWN
 	bool
 	help
@@ -333,4 +339,12 @@
 	  and above. This code allows a set of interrupt to wakeup-mask
 	  mappings. See <plat/wakeup-mask.h>
 
+comment "Power Domain"
+
+config SAMSUNG_PD
+	bool "Samsung Power Domain"
+	depends on PM_RUNTIME
+	help
+	  Say Y here if you want to control Power Domain by Runtime PM.
+
 endif
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 19d8a16..29932f8 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -74,6 +74,10 @@
 
 obj-$(CONFIG_SAMSUNG_WAKEMASK)	+= wakeup-mask.o
 
+# PD support
+
+obj-$(CONFIG_SAMSUNG_PD)	+= pd.o
+
 # PWM support
 
 obj-$(CONFIG_HAVE_PWM)		+= pwm.o
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index e8d20b0..7728928 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -39,6 +39,9 @@
 #include <linux/clk.h>
 #include <linux/spinlock.h>
 #include <linux/io.h>
+#if defined(CONFIG_DEBUG_FS)
+#include <linux/debugfs.h>
+#endif
 
 #include <mach/hardware.h>
 #include <asm/irq.h>
@@ -447,3 +450,92 @@
 	return 0;
 }
 
+#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
+/* debugfs support to trace clock tree hierarchy and attributes */
+
+static struct dentry *clk_debugfs_root;
+
+static int clk_debugfs_register_one(struct clk *c)
+{
+	int err;
+	struct dentry *d, *child, *child_tmp;
+	struct clk *pa = c->parent;
+	char s[255];
+	char *p = s;
+
+	p += sprintf(p, "%s", c->name);
+
+	if (c->id >= 0)
+		sprintf(p, ":%d", c->id);
+
+	d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
+	if (!d)
+		return -ENOMEM;
+
+	c->dent = d;
+
+	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usage);
+	if (!d) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+	if (!d) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	return 0;
+
+err_out:
+	d = c->dent;
+	list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
+		debugfs_remove(child);
+	debugfs_remove(c->dent);
+	return err;
+}
+
+static int clk_debugfs_register(struct clk *c)
+{
+	int err;
+	struct clk *pa = c->parent;
+
+	if (pa && !pa->dent) {
+		err = clk_debugfs_register(pa);
+		if (err)
+			return err;
+	}
+
+	if (!c->dent) {
+		err = clk_debugfs_register_one(c);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int __init clk_debugfs_init(void)
+{
+	struct clk *c;
+	struct dentry *d;
+	int err;
+
+	d = debugfs_create_dir("clock", NULL);
+	if (!d)
+		return -ENOMEM;
+	clk_debugfs_root = d;
+
+	list_for_each_entry(c, &clocks, list) {
+		err = clk_debugfs_register(c);
+		if (err)
+			goto err_out;
+	}
+	return 0;
+
+err_out:
+	debugfs_remove_recursive(clk_debugfs_root);
+	return err;
+}
+late_initcall(clk_debugfs_init);
+
+#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
diff --git a/arch/arm/plat-samsung/dev-nand.c b/arch/arm/plat-samsung/dev-nand.c
index 3a7b889..6927ae8 100644
--- a/arch/arm/plat-samsung/dev-nand.c
+++ b/arch/arm/plat-samsung/dev-nand.c
@@ -126,5 +126,3 @@
 
 	s3c_device_nand.dev.platform_data = npd;
 }
-
-EXPORT_SYMBOL_GPL(s3c_nand_set_platdata);
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index 0aa32f2..1c0b040 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -278,6 +278,48 @@
 	pup &= 0x3;
 	return (__force s3c_gpio_pull_t)pup;
 }
+
+#ifdef CONFIG_S3C_GPIO_PULL_S3C2443
+int s3c_gpio_setpull_s3c2443(struct s3c_gpio_chip *chip,
+				unsigned int off, s3c_gpio_pull_t pull)
+{
+	switch (pull) {
+	case S3C_GPIO_PULL_NONE:
+		pull = 0x01;
+		break;
+	case S3C_GPIO_PULL_UP:
+		pull = 0x00;
+		break;
+	case S3C_GPIO_PULL_DOWN:
+		pull = 0x02;
+		break;
+	}
+	return s3c_gpio_setpull_updown(chip, off, pull);
+}
+
+s3c_gpio_pull_t s3c_gpio_getpull_s3c2443(struct s3c_gpio_chip *chip,
+					unsigned int off)
+{
+	s3c_gpio_pull_t pull;
+
+	pull = s3c_gpio_getpull_updown(chip, off);
+
+	switch (pull) {
+	case 0x00:
+		pull = S3C_GPIO_PULL_UP;
+		break;
+	case 0x01:
+	case 0x03:
+		pull = S3C_GPIO_PULL_NONE;
+		break;
+	case 0x02:
+		pull = S3C_GPIO_PULL_DOWN;
+		break;
+	}
+
+	return pull;
+}
+#endif
 #endif
 
 #if defined(CONFIG_S3C_GPIO_PULL_UP) || defined(CONFIG_S3C_GPIO_PULL_DOWN)
diff --git a/arch/arm/plat-samsung/gpiolib.c b/arch/arm/plat-samsung/gpiolib.c
index c354089..ea37c04 100644
--- a/arch/arm/plat-samsung/gpiolib.c
+++ b/arch/arm/plat-samsung/gpiolib.c
@@ -197,3 +197,10 @@
 		s3c_gpiolib_add(chip);
 	}
 }
+
+void __init samsung_gpiolib_add_2bit_chips(struct s3c_gpio_chip *chip,
+					   int nr_chips)
+{
+	for (; nr_chips > 0; nr_chips--, chip++)
+		s3c_gpiolib_add(chip);
+}
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index 0fbcd0e..9a82b88 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -47,6 +47,9 @@
 
 	struct clk_ops		*ops;
 	int		    (*enable)(struct clk *, int enable);
+#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
+	struct dentry		*dent;	/* For visible tree hierarchy */
+#endif
 };
 
 /* other clocks which may be registered by board support */
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index e9e3b6e..b4d208b 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -104,6 +104,7 @@
 extern struct platform_device s5pv310_device_i2s1;
 extern struct platform_device s5pv310_device_i2s2;
 extern struct platform_device s5pv310_device_spdif;
+extern struct platform_device s5pv310_device_pd[];
 
 extern struct platform_device s5p6442_device_pcm0;
 extern struct platform_device s5p6442_device_pcm1;
@@ -115,6 +116,8 @@
 extern struct platform_device s5p6440_device_iis;
 
 extern struct platform_device s5p6450_device_iis0;
+extern struct platform_device s5p6450_device_iis1;
+extern struct platform_device s5p6450_device_iis2;
 extern struct platform_device s5p6450_device_pcm0;
 
 extern struct platform_device s5pc100_device_ac97;
@@ -131,6 +134,11 @@
 extern struct platform_device s5p_device_fimc1;
 extern struct platform_device s5p_device_fimc2;
 
+extern struct platform_device s5p_device_mipi_csis0;
+extern struct platform_device s5p_device_mipi_csis1;
+
+extern struct platform_device s5pv310_device_sysmmu;
+
 /* s3c2440 specific devices */
 
 #ifdef CONFIG_CPU_S3C2440
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
index 0d2c570..5603db0 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
@@ -244,7 +244,7 @@
  * This helper function reads the state of the pull-{up,down} resistor for the
  * given GPIO in the same case as s3c_gpio_setpull_upown.
 */
-extern s3c_gpio_pull_t s3c_gpio_getpull_s3c24xx(struct s3c_gpio_chip *chip,
+extern s3c_gpio_pull_t s3c_gpio_getpull_s3c2443(struct s3c_gpio_chip *chip,
 						unsigned int off);
 
 #endif /* __PLAT_GPIO_CFG_HELPERS_H */
diff --git a/arch/arm/plat-samsung/include/plat/gpio-core.h b/arch/arm/plat-samsung/include/plat/gpio-core.h
index 13a22b8..dac35d0 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-core.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-core.h
@@ -118,6 +118,8 @@
 					   int nr_chips);
 extern void samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip,
 					    int nr_chips);
+extern void samsung_gpiolib_add_2bit_chips(struct s3c_gpio_chip *chip,
+					   int nr_chips);
 
 extern void samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip);
 extern void samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip);
diff --git a/arch/arm/plat-samsung/include/plat/pd.h b/arch/arm/plat-samsung/include/plat/pd.h
new file mode 100644
index 0000000..5f0ad85
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/pd.h
@@ -0,0 +1,30 @@
+/* linux/arch/arm/plat-samsung/include/plat/pd.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_PLAT_SAMSUNG_PD_H
+#define __ASM_PLAT_SAMSUNG_PD_H __FILE__
+
+struct samsung_pd_info {
+	int (*enable)(struct device *dev);
+	int (*disable)(struct device *dev);
+	void __iomem *base;
+};
+
+enum s5pv310_pd_block {
+	PD_MFC,
+	PD_G3D,
+	PD_LCD0,
+	PD_LCD1,
+	PD_TV,
+	PD_CAM,
+	PD_GPS
+};
+
+#endif /* __ASM_PLAT_SAMSUNG_PD_H */
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 245836d..d9025e3 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -15,6 +15,8 @@
  * management
 */
 
+#include <linux/irq.h>
+
 #ifdef CONFIG_PM
 
 extern __init int s3c_pm_init(void);
@@ -100,7 +102,7 @@
 extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count);
 
 #ifdef CONFIG_PM
-extern int s3c_irqext_wake(unsigned int irqno, unsigned int state);
+extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
 extern int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state);
 extern int s3c24xx_irq_resume(struct sys_device *dev);
 #else
diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h
index 85853f8..5a41a0b 100644
--- a/arch/arm/plat-samsung/include/plat/sdhci.h
+++ b/arch/arm/plat-samsung/include/plat/sdhci.h
@@ -107,6 +107,8 @@
 
 /* Helper function availablity */
 
+extern void s3c2416_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
+extern void s3c2416_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
 extern void s3c64xx_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
 extern void s3c64xx_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
 extern void s5pc100_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
@@ -122,6 +124,39 @@
 extern void s5pv310_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
 extern void s5pv310_setup_sdhci3_cfg_gpio(struct platform_device *, int w);
 
+/* S3C2416 SDHCI setup */
+
+#ifdef CONFIG_S3C2416_SETUP_SDHCI
+extern char *s3c2416_hsmmc_clksrcs[4];
+
+extern void s3c2416_setup_sdhci_cfg_card(struct platform_device *dev,
+					   void __iomem *r,
+					   struct mmc_ios *ios,
+					   struct mmc_card *card);
+
+static inline void s3c2416_default_sdhci0(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC
+	s3c_hsmmc0_def_platdata.clocks = s3c2416_hsmmc_clksrcs;
+	s3c_hsmmc0_def_platdata.cfg_gpio = s3c2416_setup_sdhci0_cfg_gpio;
+	s3c_hsmmc0_def_platdata.cfg_card = s3c2416_setup_sdhci_cfg_card;
+#endif /* CONFIG_S3C_DEV_HSMMC */
+}
+
+static inline void s3c2416_default_sdhci1(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC1
+	s3c_hsmmc1_def_platdata.clocks = s3c2416_hsmmc_clksrcs;
+	s3c_hsmmc1_def_platdata.cfg_gpio = s3c2416_setup_sdhci1_cfg_gpio;
+	s3c_hsmmc1_def_platdata.cfg_card = s3c2416_setup_sdhci_cfg_card;
+#endif /* CONFIG_S3C_DEV_HSMMC1 */
+}
+
+#else
+static inline void s3c2416_default_sdhci0(void) { }
+static inline void s3c2416_default_sdhci1(void) { }
+
+#endif /* CONFIG_S3C2416_SETUP_SDHCI */
 /* S3C64XX SDHCI setup */
 
 #ifdef CONFIG_S3C64XX_SETUP_SDHCI
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c
index 4f8c102..4e77035 100644
--- a/arch/arm/plat-samsung/irq-uart.c
+++ b/arch/arm/plat-samsung/irq-uart.c
@@ -28,9 +28,9 @@
  * are consecutive when looking up the interrupt in the demux routines.
  */
 
-static inline void __iomem *s3c_irq_uart_base(unsigned int irq)
+static inline void __iomem *s3c_irq_uart_base(struct irq_data *data)
 {
-	struct s3c_uart_irq *uirq = get_irq_chip_data(irq);
+	struct s3c_uart_irq *uirq = irq_data_get_irq_chip_data(data);
 	return uirq->regs;
 }
 
@@ -39,10 +39,10 @@
 	return irq & 3;
 }
 
-static void s3c_irq_uart_mask(unsigned int irq)
+static void s3c_irq_uart_mask(struct irq_data *data)
 {
-	void __iomem *regs = s3c_irq_uart_base(irq);
-	unsigned int bit = s3c_irq_uart_bit(irq);
+	void __iomem *regs = s3c_irq_uart_base(data);
+	unsigned int bit = s3c_irq_uart_bit(data->irq);
 	u32 reg;
 
 	reg = __raw_readl(regs + S3C64XX_UINTM);
@@ -50,10 +50,10 @@
 	__raw_writel(reg, regs + S3C64XX_UINTM);
 }
 
-static void s3c_irq_uart_maskack(unsigned int irq)
+static void s3c_irq_uart_maskack(struct irq_data *data)
 {
-	void __iomem *regs = s3c_irq_uart_base(irq);
-	unsigned int bit = s3c_irq_uart_bit(irq);
+	void __iomem *regs = s3c_irq_uart_base(data);
+	unsigned int bit = s3c_irq_uart_bit(data->irq);
 	u32 reg;
 
 	reg = __raw_readl(regs + S3C64XX_UINTM);
@@ -62,10 +62,10 @@
 	__raw_writel(1 << bit, regs + S3C64XX_UINTP);
 }
 
-static void s3c_irq_uart_unmask(unsigned int irq)
+static void s3c_irq_uart_unmask(struct irq_data *data)
 {
-	void __iomem *regs = s3c_irq_uart_base(irq);
-	unsigned int bit = s3c_irq_uart_bit(irq);
+	void __iomem *regs = s3c_irq_uart_base(data);
+	unsigned int bit = s3c_irq_uart_bit(data->irq);
 	u32 reg;
 
 	reg = __raw_readl(regs + S3C64XX_UINTM);
@@ -73,17 +73,17 @@
 	__raw_writel(reg, regs + S3C64XX_UINTM);
 }
 
-static void s3c_irq_uart_ack(unsigned int irq)
+static void s3c_irq_uart_ack(struct irq_data *data)
 {
-	void __iomem *regs = s3c_irq_uart_base(irq);
-	unsigned int bit = s3c_irq_uart_bit(irq);
+	void __iomem *regs = s3c_irq_uart_base(data);
+	unsigned int bit = s3c_irq_uart_bit(data->irq);
 
 	__raw_writel(1 << bit, regs + S3C64XX_UINTP);
 }
 
 static void s3c_irq_demux_uart(unsigned int irq, struct irq_desc *desc)
 {
-	struct s3c_uart_irq *uirq = desc->handler_data;
+	struct s3c_uart_irq *uirq = desc->irq_data.handler_data;
 	u32 pend = __raw_readl(uirq->regs + S3C64XX_UINTP);
 	int base = uirq->base_irq;
 
@@ -99,10 +99,10 @@
 
 static struct irq_chip s3c_irq_uart = {
 	.name		= "s3c-uart",
-	.mask		= s3c_irq_uart_mask,
-	.unmask		= s3c_irq_uart_unmask,
-	.mask_ack	= s3c_irq_uart_maskack,
-	.ack		= s3c_irq_uart_ack,
+	.irq_mask	= s3c_irq_uart_mask,
+	.irq_unmask	= s3c_irq_uart_unmask,
+	.irq_mask_ack	= s3c_irq_uart_maskack,
+	.irq_ack	= s3c_irq_uart_ack,
 };
 
 static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
@@ -124,7 +124,7 @@
 		set_irq_flags(irq, IRQF_VALID);
 	}
 
-	desc->handler_data = uirq;
+	desc->irq_data.handler_data = uirq;
 	set_irq_chained_handler(uirq->parent_irq, s3c_irq_demux_uart);
 }
 
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c
index 0270519..dd8692a 100644
--- a/arch/arm/plat-samsung/irq-vic-timer.c
+++ b/arch/arm/plat-samsung/irq-vic-timer.c
@@ -24,43 +24,46 @@
 
 static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
 {
-	generic_handle_irq((int)desc->handler_data);
+	generic_handle_irq((int)desc->irq_data.handler_data);
 }
 
 /* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
 
-static void s3c_irq_timer_mask(unsigned int irq)
+static void s3c_irq_timer_mask(struct irq_data *data)
 {
 	u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
+	u32 mask = (u32)data->chip_data;
 
 	reg &= 0x1f;  /* mask out pending interrupts */
-	reg &= ~(1 << (irq - IRQ_TIMER0));
+	reg &= ~mask;
 	__raw_writel(reg, S3C64XX_TINT_CSTAT);
 }
 
-static void s3c_irq_timer_unmask(unsigned int irq)
+static void s3c_irq_timer_unmask(struct irq_data *data)
 {
 	u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
+	u32 mask = (u32)data->chip_data;
 
 	reg &= 0x1f;  /* mask out pending interrupts */
-	reg |= 1 << (irq - IRQ_TIMER0);
+	reg |= mask;
 	__raw_writel(reg, S3C64XX_TINT_CSTAT);
 }
 
-static void s3c_irq_timer_ack(unsigned int irq)
+static void s3c_irq_timer_ack(struct irq_data *data)
 {
 	u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
+	u32 mask = (u32)data->chip_data;
 
 	reg &= 0x1f;
-	reg |= (1 << 5) << (irq - IRQ_TIMER0);
+	reg |= mask << 5;
 	__raw_writel(reg, S3C64XX_TINT_CSTAT);
 }
 
 static struct irq_chip s3c_irq_timer = {
 	.name		= "s3c-timer",
-	.mask		= s3c_irq_timer_mask,
-	.unmask		= s3c_irq_timer_unmask,
-	.ack		= s3c_irq_timer_ack,
+	.irq_mask	= s3c_irq_timer_mask,
+	.irq_unmask	= s3c_irq_timer_unmask,
+	.irq_ack	= s3c_irq_timer_ack,
 };
 
 /**
@@ -79,8 +82,9 @@
 	set_irq_chained_handler(parent_irq, s3c_irq_demux_vic_timer);
 
 	set_irq_chip(timer_irq, &s3c_irq_timer);
+	set_irq_chip_data(timer_irq, (void *)(1 << (timer_irq - IRQ_TIMER0)));
 	set_irq_handler(timer_irq, handle_level_irq);
 	set_irq_flags(timer_irq, IRQF_VALID);
 
-	desc->handler_data = (void *)timer_irq;
+	desc->irq_data.handler_data = (void *)timer_irq;
 }
diff --git a/arch/arm/plat-samsung/pd.c b/arch/arm/plat-samsung/pd.c
new file mode 100644
index 0000000..efe1d56
--- /dev/null
+++ b/arch/arm/plat-samsung/pd.c
@@ -0,0 +1,95 @@
+/* linux/arch/arm/plat-samsung/pd.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Samsung Power domain support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+
+#include <plat/pd.h>
+
+static int samsung_pd_probe(struct platform_device *pdev)
+{
+	struct samsung_pd_info *pdata = pdev->dev.platform_data;
+	struct device *dev = &pdev->dev;
+
+	if (!pdata) {
+		dev_err(dev, "no device data specified\n");
+		return -ENOENT;
+	}
+
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	dev_info(dev, "power domain registered\n");
+	return 0;
+}
+
+static int __devexit samsung_pd_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	pm_runtime_disable(dev);
+	return 0;
+}
+
+static int samsung_pd_runtime_suspend(struct device *dev)
+{
+	struct samsung_pd_info *pdata = dev->platform_data;
+	int ret = 0;
+
+	if (pdata->disable)
+		ret = pdata->disable(dev);
+
+	dev_dbg(dev, "suspended\n");
+	return ret;
+}
+
+static int samsung_pd_runtime_resume(struct device *dev)
+{
+	struct samsung_pd_info *pdata = dev->platform_data;
+	int ret = 0;
+
+	if (pdata->enable)
+		ret = pdata->enable(dev);
+
+	dev_dbg(dev, "resumed\n");
+	return ret;
+}
+
+static const struct dev_pm_ops samsung_pd_pm_ops = {
+	.runtime_suspend	= samsung_pd_runtime_suspend,
+	.runtime_resume		= samsung_pd_runtime_resume,
+};
+
+static struct platform_driver samsung_pd_driver = {
+	.driver		= {
+		.name		= "samsung-pd",
+		.owner		= THIS_MODULE,
+		.pm		= &samsung_pd_pm_ops,
+	},
+	.probe		= samsung_pd_probe,
+	.remove		= __devexit_p(samsung_pd_remove),
+};
+
+static int __init samsung_pd_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&samsung_pd_driver);
+	if (ret)
+		printk(KERN_ERR "%s: failed to add PD driver\n", __func__);
+
+	return ret;
+}
+arch_initcall(samsung_pd_init);
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 5bf3f2f..02d531f 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -136,15 +136,15 @@
 unsigned long s3c_irqwake_intmask	= 0xffffffffL;
 unsigned long s3c_irqwake_eintmask	= 0xffffffffL;
 
-int s3c_irqext_wake(unsigned int irqno, unsigned int state)
+int s3c_irqext_wake(struct irq_data *data, unsigned int state)
 {
-	unsigned long bit = 1L << IRQ_EINT_BIT(irqno);
+	unsigned long bit = 1L << IRQ_EINT_BIT(data->irq);
 
 	if (!(s3c_irqwake_eintallow & bit))
 		return -ENOENT;
 
 	printk(KERN_INFO "wake %s for irq %d\n",
-	       state ? "enabled" : "disabled", irqno);
+	       state ? "enabled" : "disabled", data->irq);
 
 	if (!state)
 		s3c_irqwake_eintmask |= bit;
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c
index 2172d69..7818903 100644
--- a/arch/arm/plat-spear/shirq.c
+++ b/arch/arm/plat-spear/shirq.c
@@ -20,10 +20,10 @@
 struct spear_shirq *shirq;
 static DEFINE_SPINLOCK(lock);
 
-static void shirq_irq_mask(unsigned irq)
+static void shirq_irq_mask(struct irq_data *d)
 {
-	struct spear_shirq *shirq = get_irq_chip_data(irq);
-	u32 val, id = irq - shirq->dev_config[0].virq;
+	struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+	u32 val, id = d->irq - shirq->dev_config[0].virq;
 	unsigned long flags;
 
 	if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
@@ -39,10 +39,10 @@
 	spin_unlock_irqrestore(&lock, flags);
 }
 
-static void shirq_irq_unmask(unsigned irq)
+static void shirq_irq_unmask(struct irq_data *d)
 {
-	struct spear_shirq *shirq = get_irq_chip_data(irq);
-	u32 val, id = irq - shirq->dev_config[0].virq;
+	struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+	u32 val, id = d->irq - shirq->dev_config[0].virq;
 	unsigned long flags;
 
 	if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
@@ -60,9 +60,9 @@
 
 static struct irq_chip shirq_chip = {
 	.name		= "spear_shirq",
-	.ack		= shirq_irq_mask,
-	.mask		= shirq_irq_mask,
-	.unmask		= shirq_irq_unmask,
+	.irq_ack	= shirq_irq_mask,
+	.irq_mask	= shirq_irq_mask,
+	.irq_unmask	= shirq_irq_unmask,
 };
 
 static void shirq_handler(unsigned irq, struct irq_desc *desc)
@@ -70,7 +70,7 @@
 	u32 i, val, mask;
 	struct spear_shirq *shirq = get_irq_data(irq);
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	while ((val = readl(shirq->regs.base + shirq->regs.status_reg) &
 				shirq->regs.status_reg_mask)) {
 		for (i = 0; (i < shirq->dev_count) && val; i++) {
@@ -92,7 +92,7 @@
 			writel(mask, shirq->regs.base + shirq->regs.clear_reg);
 		}
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 int spear_shirq_register(struct spear_shirq *shirq)
diff --git a/arch/arm/plat-stmp3xxx/irq.c b/arch/arm/plat-stmp3xxx/irq.c
index 20de4e0..aaa1686 100644
--- a/arch/arm/plat-stmp3xxx/irq.c
+++ b/arch/arm/plat-stmp3xxx/irq.c
@@ -34,7 +34,7 @@
 
 	/* Disable all interrupts initially */
 	for (i = 0; i < NR_REAL_IRQS; i++) {
-		chip->mask(i);
+		chip->irq_mask(irq_get_irq_data(i));
 		set_irq_chip(i, chip);
 		set_irq_handler(i, handle_level_irq);
 		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
diff --git a/arch/arm/plat-stmp3xxx/pinmux.c b/arch/arm/plat-stmp3xxx/pinmux.c
index 6d6b1a4..66d5bac 100644
--- a/arch/arm/plat-stmp3xxx/pinmux.c
+++ b/arch/arm/plat-stmp3xxx/pinmux.c
@@ -351,27 +351,27 @@
 }
 EXPORT_SYMBOL(stmp3xxx_release_pin_group);
 
-static int stmp3xxx_irq_to_gpio(int irq,
+static int stmp3xxx_irq_data_to_gpio(struct irq_data *d,
 	struct stmp3xxx_pinmux_bank **bank, unsigned *gpio)
 {
 	struct stmp3xxx_pinmux_bank *pm;
 
 	for (pm = pinmux_banks; pm < pinmux_banks + NR_BANKS; pm++)
-		if (pm->virq <= irq && irq < pm->virq + 32) {
+		if (pm->virq <= d->irq && d->irq < pm->virq + 32) {
 			*bank = pm;
-			*gpio = irq - pm->virq;
+			*gpio = d->irq - pm->virq;
 			return 0;
 		}
 	return -ENOENT;
 }
 
-static int stmp3xxx_set_irqtype(unsigned irq, unsigned type)
+static int stmp3xxx_set_irqtype(struct irq_data *d, unsigned type)
 {
 	struct stmp3xxx_pinmux_bank *pm;
 	unsigned gpio;
 	int l, p;
 
-	stmp3xxx_irq_to_gpio(irq, &pm, &gpio);
+	stmp3xxx_irq_data_to_gpio(d, &pm, &gpio);
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
 		l = 0; p = 1; break;
@@ -398,33 +398,33 @@
 	return 0;
 }
 
-static void stmp3xxx_pin_ack_irq(unsigned irq)
+static void stmp3xxx_pin_ack_irq(struct irq_data *d)
 {
 	u32 stat;
 	struct stmp3xxx_pinmux_bank *pm;
 	unsigned gpio;
 
-	stmp3xxx_irq_to_gpio(irq, &pm, &gpio);
+	stmp3xxx_irq_data_to_gpio(d, &pm, &gpio);
 	stat = __raw_readl(pm->irqstat) & (1 << gpio);
 	stmp3xxx_clearl(stat, pm->irqstat);
 }
 
-static void stmp3xxx_pin_mask_irq(unsigned irq)
+static void stmp3xxx_pin_mask_irq(struct irq_data *d)
 {
 	struct stmp3xxx_pinmux_bank *pm;
 	unsigned gpio;
 
-	stmp3xxx_irq_to_gpio(irq, &pm, &gpio);
+	stmp3xxx_irq_data_to_gpio(d, &pm, &gpio);
 	stmp3xxx_clearl(1 << gpio, pm->irqen);
 	stmp3xxx_clearl(1 << gpio, pm->pin2irq);
 }
 
-static void stmp3xxx_pin_unmask_irq(unsigned irq)
+static void stmp3xxx_pin_unmask_irq(struct irq_data *d)
 {
 	struct stmp3xxx_pinmux_bank *pm;
 	unsigned gpio;
 
-	stmp3xxx_irq_to_gpio(irq, &pm, &gpio);
+	stmp3xxx_irq_data_to_gpio(d, &pm, &gpio);
 	stmp3xxx_setl(1 << gpio, pm->irqen);
 	stmp3xxx_setl(1 << gpio, pm->pin2irq);
 }
@@ -503,10 +503,10 @@
 }
 
 static struct irq_chip gpio_irq_chip = {
-	.ack	= stmp3xxx_pin_ack_irq,
-	.mask	= stmp3xxx_pin_mask_irq,
-	.unmask	= stmp3xxx_pin_unmask_irq,
-	.set_type = stmp3xxx_set_irqtype,
+	.irq_ack	= stmp3xxx_pin_ack_irq,
+	.irq_mask	= stmp3xxx_pin_mask_irq,
+	.irq_unmask	= stmp3xxx_pin_unmask_irq,
+	.irq_set_type	= stmp3xxx_set_irqtype,
 };
 
 int __init stmp3xxx_pinmux_init(int virtual_irq_start)
@@ -533,7 +533,7 @@
 		pm->virq = virtual_irq_start + b * 32;
 
 		for (virq = pm->virq; virq < pm->virq; virq++) {
-			gpio_irq_chip.mask(virq);
+			gpio_irq_chip.irq_mask(irq_get_irq_data(virq));
 			set_irq_chip(virq, &gpio_irq_chip);
 			set_irq_handler(virq, handle_level_irq);
 			set_irq_flags(virq, IRQF_VALID);
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 41b6d31..961a16f 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -189,6 +189,7 @@
 # define pgprot_val(x)	((x).pgprot)
 
 # define __pte(x)	((pte_t) { (x) } )
+# define __pmd(x)	((pmd_t) { (x) } )
 # define __pgprot(x)	((pgprot_t) { (x) } )
 
 #else /* !STRICT_MM_TYPECHECKS */
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 865f37a..6f1f65d 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -10,11 +10,13 @@
  * we simulate an x86-style page table for the linux mm code
  */
 
-#include <linux/mm.h>		/* for vm_area_struct */
 #include <linux/bitops.h>
+#include <linux/spinlock.h>
 #include <asm/processor.h>
 #include <asm/cache.h>
 
+struct vm_area_struct;
+
 /*
  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
  * memory.  For the return value to be meaningful, ADDR must be >=
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 47ae4a7..3ed5ad9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2068,6 +2068,7 @@
 	bool "One Laptop Per Child support"
 	select GPIOLIB
 	select OLPC_OPENFIRMWARE
+	depends on !X86_64 && !X86_PAE
 	---help---
 	  Add support for detecting the unique features of the OLPC
 	  XO hardware.
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 7c9ab59..51ef31a 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -313,14 +313,16 @@
 	if (adev->irq == 0)
 		return;
 
+	irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
+	irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
+	/* APB timer irqs are set up as mp_irqs, timer is edge type */
+	__set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
+
 	if (system_state == SYSTEM_BOOTING) {
-		irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
-		irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
-		/* APB timer irqs are set up as mp_irqs, timer is edge type */
-		__set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
 		if (request_irq(adev->irq, apbt_interrupt_handler,
-				IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
-				adev->name, adev)) {
+					IRQF_TIMER | IRQF_DISABLED |
+					IRQF_NOBALANCING,
+					adev->name, adev)) {
 			printk(KERN_ERR "Failed request IRQ for APBT%d\n",
 			       adev->num);
 		}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 823f79a..ffe5755 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -464,7 +464,7 @@
 		tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
 
 		/* hpet or pmtimer available ? */
-		if (!hpet && !ref1 && !ref2)
+		if (ref1 == ref2)
 			continue;
 
 		/* Check, whether the sampling was disturbed by an SMI */
@@ -935,7 +935,7 @@
 	tsc_stop = tsc_read_refs(&ref_stop, hpet);
 
 	/* hpet or pmtimer available ? */
-	if (!hpet && !ref_start && !ref_stop)
+	if (ref_start == ref_stop)
 		goto out;
 
 	/* Check, whether the sampling was disturbed by an SMI */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 7575e55..5e92b61 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -201,6 +201,7 @@
 	offset = address & ~PAGE_MASK;
 	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
 }
+EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
 
 void make_lowmem_page_readonly(void *vaddr)
 {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 8427697..501ffdf 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -598,8 +598,8 @@
 	return cfq_target_latency * cfqg->weight / st->total_weight;
 }
 
-static inline void
-cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static inline unsigned
+cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
 	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
 	if (cfqd->cfq_latency) {
@@ -625,6 +625,14 @@
 				    low_slice);
 		}
 	}
+	return slice;
+}
+
+static inline void
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+	unsigned slice = cfq_scaled_group_slice(cfqd, cfqq);
+
 	cfqq->slice_start = jiffies;
 	cfqq->slice_end = jiffies + slice;
 	cfqq->allocated_slice = slice;
@@ -1661,8 +1669,11 @@
 	/*
 	 * store what was left of this slice, if the queue idled/timed out
 	 */
-	if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
-		cfqq->slice_resid = cfqq->slice_end - jiffies;
+	if (timed_out) {
+		if (cfq_cfqq_slice_new(cfqq))
+			cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq);
+		else
+			cfqq->slice_resid = cfqq->slice_end - jiffies;
 		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
 	}
 
@@ -3284,10 +3295,19 @@
  */
 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
+	struct cfq_queue *old_cfqq = cfqd->active_queue;
+
 	cfq_log_cfqq(cfqd, cfqq, "preempt");
 	cfq_slice_expired(cfqd, 1);
 
 	/*
+	 * workload type is changed, don't save slice, otherwise preempt
+	 * doesn't happen
+	 */
+	if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
+		cfqq->cfqg->saved_workload_slice = 0;
+
+	/*
 	 * Put the new queue at the front of the of the current list,
 	 * so we know that it will be selected next.
 	 */
diff --git a/drivers/Kconfig b/drivers/Kconfig
index dd0a5b5..9bfb71f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -26,6 +26,8 @@
 
 source "drivers/md/Kconfig"
 
+source "drivers/target/Kconfig"
+
 source "drivers/message/fusion/Kconfig"
 
 source "drivers/firewire/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index ef51324..7eb35f4 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -46,6 +46,7 @@
 obj-$(CONFIG_IDE)		+= ide/
 obj-$(CONFIG_SCSI)		+= scsi/
 obj-$(CONFIG_ATA)		+= ata/
+obj-$(CONFIG_TARGET_CORE)	+= target/
 obj-$(CONFIG_MTD)		+= mtd/
 obj-$(CONFIG_SPI)		+= spi/
 obj-y				+= net/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 9bb69c5..0e4dba0 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -228,8 +228,10 @@
  */
 ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock;	/* For GPE data structs and registers */
 ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock;	/* For ACPI H/W except GPE registers */
+ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
 #define acpi_gbl_gpe_lock	&_acpi_gbl_gpe_lock
 #define acpi_gbl_hardware_lock	&_acpi_gbl_hardware_lock
+#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
 
 /*****************************************************************************
  *
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 8e31bb5..38bba66 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -293,8 +293,6 @@
  *
  ******************************************************************************/
 static u8 acpi_ev_global_lock_pending;
-static spinlock_t _acpi_ev_global_lock_pending_lock;
-#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
 
 static u32 acpi_ev_global_lock_handler(void *context)
 {
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index d9efa49..199528f 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -85,6 +85,7 @@
 
 	spin_lock_init(acpi_gbl_gpe_lock);
 	spin_lock_init(acpi_gbl_hardware_lock);
+	spin_lock_init(acpi_ev_global_lock_pending_lock);
 
 	/* Mutex for _OSI support */
 	status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 4ee58e7..abda378 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -201,14 +201,14 @@
 	int rc = -ENODEV;
 	unsigned int ghes_count = 0;
 
-	if (acpi_disabled)
-		return;
-
 	if (hest_disable) {
 		pr_info(HEST_PFX "Table parsing disabled.\n");
 		return;
 	}
 
+	if (acpi_disabled)
+		goto err;
+
 	status = acpi_get_table(ACPI_SIG_HEST, 0,
 				(struct acpi_table_header **)&hest_tab);
 	if (status == AE_NOT_FOUND) {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d9926af..5eb25eb 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -275,23 +275,19 @@
 int __init acpi_numa_init(void)
 {
 	int ret = 0;
-	int nr_cpu_entries = nr_cpu_ids;
 
-#ifdef CONFIG_X86
 	/*
 	 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
 	 * SRAT cpu entries could have different order with that in MADT.
 	 * So go over all cpu entries in SRAT to get apicid to node mapping.
 	 */
-	nr_cpu_entries = MAX_LOCAL_APIC;
-#endif
 
 	/* SRAT: Static Resource Affinity Table */
 	if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
 		acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
-				     acpi_parse_x2apic_affinity, nr_cpu_entries);
+				     acpi_parse_x2apic_affinity, 0);
 		acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
-				     acpi_parse_processor_affinity, nr_cpu_entries);
+				     acpi_parse_processor_affinity, 0);
 		ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
 					    acpi_parse_memory_affinity,
 					    NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d976679..8524939 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -633,11 +633,11 @@
 
 static int __init acpi_pci_root_init(void)
 {
+	acpi_hest_init();
+
 	if (acpi_pci_disabled)
 		return 0;
 
-	acpi_hest_init();
-
 	pci_acpi_crs_quirks();
 	if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
 		return -ENODEV;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ef13873..1c28816 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -200,11 +200,16 @@
 	  platform_data for a dma-pl330 device.
 
 config PCH_DMA
-	tristate "Topcliff (Intel EG20T) PCH DMA support"
+	tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
 	depends on PCI && X86
 	select DMA_ENGINE
 	help
-	  Enable support for the Topcliff (Intel EG20T) PCH DMA engine.
+	  Enable support for Intel EG20T PCH DMA engine.
+
+	  This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
+	  Output Hub) which is for IVI(In-Vehicle Infotainment) use.
+	  ML7213 is companion chip for Intel Atom E6xx series.
+	  ML7213 is completely compatible for Intel EG20T PCH.
 
 config IMX_SDMA
 	tristate "i.MX SDMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b605cc9..297f48b 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -19,14 +19,14 @@
  * this program; if not, write to the Free Software Foundation, Inc., 59
  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  *
- * The full GNU General Public License is iin this distribution in the
- * file called COPYING.
+ * The full GNU General Public License is in this distribution in the file
+ * called COPYING.
  *
  * Documentation: ARM DDI 0196G == PL080
- * Documentation: ARM DDI 0218E	== PL081
+ * Documentation: ARM DDI 0218E == PL081
  *
- * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
- * any channel.
+ * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
+ * channel.
  *
  * The PL080 has 8 channels available for simultaneous use, and the PL081
  * has only two channels. So on these DMA controllers the number of channels
@@ -53,7 +53,23 @@
  *
  * ASSUMES default (little) endianness for DMA transfers
  *
- * Only DMAC flow control is implemented
+ * The PL08x has two flow control settings:
+ *  - DMAC flow control: the transfer size defines the number of transfers
+ *    which occur for the current LLI entry, and the DMAC raises TC at the
+ *    end of every LLI entry.  Observed behaviour shows the DMAC listening
+ *    to both the BREQ and SREQ signals (contrary to documented),
+ *    transferring data if either is active.  The LBREQ and LSREQ signals
+ *    are ignored.
+ *
+ *  - Peripheral flow control: the transfer size is ignored (and should be
+ *    zero).  The data is transferred from the current LLI entry, until
+ *    after the final transfer signalled by LBREQ or LSREQ.  The DMAC
+ *    will then move to the next LLI entry.
+ *
+ * Only the former works sanely with scatter lists, so we only implement
+ * the DMAC flow control method.  However, peripherals which use the LBREQ
+ * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
+ * these hardware restrictions prevents them from using scatter DMA.
  *
  * Global TODO:
  * - Break out common code from arch/arm/mach-s3c64xx and share
@@ -61,50 +77,39 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/dmapool.h>
-#include <linux/amba/bus.h>
 #include <linux/dmaengine.h>
+#include <linux/amba/bus.h>
 #include <linux/amba/pl08x.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 
 #include <asm/hardware/pl080.h>
-#include <asm/dma.h>
-#include <asm/mach/dma.h>
-#include <asm/atomic.h>
-#include <asm/processor.h>
-#include <asm/cacheflush.h>
 
 #define DRIVER_NAME	"pl08xdmac"
 
 /**
- * struct vendor_data - vendor-specific config parameters
- * for PL08x derivates
- * @name: the name of this specific variant
+ * struct vendor_data - vendor-specific config parameters for PL08x derivatives
  * @channels: the number of channels available in this variant
- * @dualmaster: whether this version supports dual AHB masters
- * or not.
+ * @dualmaster: whether this version supports dual AHB masters or not.
  */
 struct vendor_data {
-	char *name;
 	u8 channels;
 	bool dualmaster;
 };
 
 /*
  * PL08X private data structures
- * An LLI struct - see pl08x TRM
- * Note that next uses bit[0] as a bus bit,
- * start & end do not - their bus bit info
- * is in cctl
+ * An LLI struct - see PL08x TRM.  Note that next uses bit[0] as a bus bit,
+ * start & end do not - their bus bit info is in cctl.  Also note that these
+ * are fixed 32-bit quantities.
  */
-struct lli {
-	dma_addr_t src;
-	dma_addr_t dst;
-	dma_addr_t next;
+struct pl08x_lli {
+	u32 src;
+	u32 dst;
+	u32 lli;
 	u32 cctl;
 };
 
@@ -119,6 +124,8 @@
  * @phy_chans: array of data for the physical channels
  * @pool: a pool for the LLI descriptors
  * @pool_ctr: counter of LLIs in the pool
+ * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
+ * @mem_buses: set to indicate memory transfers on AHB2.
  * @lock: a spinlock for this struct
  */
 struct pl08x_driver_data {
@@ -126,11 +133,13 @@
 	struct dma_device memcpy;
 	void __iomem *base;
 	struct amba_device *adev;
-	struct vendor_data *vd;
+	const struct vendor_data *vd;
 	struct pl08x_platform_data *pd;
 	struct pl08x_phy_chan *phy_chans;
 	struct dma_pool *pool;
 	int pool_ctr;
+	u8 lli_buses;
+	u8 mem_buses;
 	spinlock_t lock;
 };
 
@@ -152,9 +161,9 @@
 /* Size (bytes) of each LLI buffer allocated for one transfer */
 # define PL08X_LLI_TSFR_SIZE	0x2000
 
-/* Maximimum times we call dma_pool_alloc on this pool without freeing */
+/* Maximum times we call dma_pool_alloc on this pool without freeing */
 #define PL08X_MAX_ALLOCS	0x40
-#define MAX_NUM_TSFR_LLIS	(PL08X_LLI_TSFR_SIZE/sizeof(struct lli))
+#define MAX_NUM_TSFR_LLIS	(PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
 #define PL08X_ALIGN		8
 
 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -162,6 +171,11 @@
 	return container_of(chan, struct pl08x_dma_chan, chan);
 }
 
+static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
+{
+	return container_of(tx, struct pl08x_txd, tx);
+}
+
 /*
  * Physical channel handling
  */
@@ -177,88 +191,47 @@
 
 /*
  * Set the initial DMA register values i.e. those for the first LLI
- * The next lli pointer and the configuration interrupt bit have
- * been set when the LLIs were constructed
+ * The next LLI pointer and the configuration interrupt bit have
+ * been set when the LLIs were constructed.  Poke them into the hardware
+ * and start the transfer.
  */
-static void pl08x_set_cregs(struct pl08x_driver_data *pl08x,
-			    struct pl08x_phy_chan *ch)
+static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
+	struct pl08x_txd *txd)
 {
-	/* Wait for channel inactive */
-	while (pl08x_phy_channel_busy(ch))
-		;
-
-	dev_vdbg(&pl08x->adev->dev,
-		"WRITE channel %d: csrc=%08x, cdst=%08x, "
-		 "cctl=%08x, clli=%08x, ccfg=%08x\n",
-		ch->id,
-		ch->csrc,
-		ch->cdst,
-		ch->cctl,
-		ch->clli,
-		ch->ccfg);
-
-	writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
-	writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
-	writel(ch->clli, ch->base + PL080_CH_LLI);
-	writel(ch->cctl, ch->base + PL080_CH_CONTROL);
-	writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
-}
-
-static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
-{
-	struct pl08x_channel_data *cd = plchan->cd;
+	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_phy_chan *phychan = plchan->phychan;
-	struct pl08x_txd *txd = plchan->at;
-
-	/* Copy the basic control register calculated at transfer config */
-	phychan->csrc = txd->csrc;
-	phychan->cdst = txd->cdst;
-	phychan->clli = txd->clli;
-	phychan->cctl = txd->cctl;
-
-	/* Assign the signal to the proper control registers */
-	phychan->ccfg = cd->ccfg;
-	phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
-	phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
-	/* If it wasn't set from AMBA, ignore it */
-	if (txd->direction == DMA_TO_DEVICE)
-		/* Select signal as destination */
-		phychan->ccfg |=
-			(phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
-	else if (txd->direction == DMA_FROM_DEVICE)
-		/* Select signal as source */
-		phychan->ccfg |=
-			(phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
-	/* Always enable error interrupts */
-	phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
-	/* Always enable terminal interrupts */
-	phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
-}
-
-/*
- * Enable the DMA channel
- * Assumes all other configuration bits have been set
- * as desired before this code is called
- */
-static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
-				  struct pl08x_phy_chan *ch)
-{
+	struct pl08x_lli *lli = &txd->llis_va[0];
 	u32 val;
 
-	/*
-	 * Do not access config register until channel shows as disabled
-	 */
-	while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
-		;
+	plchan->at = txd;
 
-	/*
-	 * Do not access config register until channel shows as inactive
-	 */
-	val = readl(ch->base + PL080_CH_CONFIG);
+	/* Wait for channel inactive */
+	while (pl08x_phy_channel_busy(phychan))
+		cpu_relax();
+
+	dev_vdbg(&pl08x->adev->dev,
+		"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+		"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+		phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
+		txd->ccfg);
+
+	writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
+	writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
+	writel(lli->lli, phychan->base + PL080_CH_LLI);
+	writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
+	writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+
+	/* Enable the DMA channel */
+	/* Do not access config register until channel shows as disabled */
+	while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
+		cpu_relax();
+
+	/* Do not access config register until channel shows as inactive */
+	val = readl(phychan->base + PL080_CH_CONFIG);
 	while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
-		val = readl(ch->base + PL080_CH_CONFIG);
+		val = readl(phychan->base + PL080_CH_CONFIG);
 
-	writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG);
+	writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
 }
 
 /*
@@ -266,10 +239,8 @@
  *
  * Disabling individual channels could lose data.
  *
- * Disable the peripheral DMA after disabling the DMAC
- * in order to allow the DMAC FIFO to drain, and
- * hence allow the channel to show inactive
- *
+ * Disable the peripheral DMA after disabling the DMAC in order to allow
+ * the DMAC FIFO to drain, and hence allow the channel to show inactive
  */
 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
 {
@@ -282,7 +253,7 @@
 
 	/* Wait for channel inactive */
 	while (pl08x_phy_channel_busy(ch))
-		;
+		cpu_relax();
 }
 
 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -333,54 +304,56 @@
 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_phy_chan *ch;
-	struct pl08x_txd *txdi = NULL;
 	struct pl08x_txd *txd;
 	unsigned long flags;
-	u32 bytes = 0;
+	size_t bytes = 0;
 
 	spin_lock_irqsave(&plchan->lock, flags);
-
 	ch = plchan->phychan;
 	txd = plchan->at;
 
 	/*
-	 * Next follow the LLIs to get the number of pending bytes in the
-	 * currently active transaction.
+	 * Follow the LLIs to get the number of remaining
+	 * bytes in the currently active transaction.
 	 */
 	if (ch && txd) {
-		struct lli *llis_va = txd->llis_va;
-		struct lli *llis_bus = (struct lli *) txd->llis_bus;
-		u32 clli = readl(ch->base + PL080_CH_LLI);
+		u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
 
-		/* First get the bytes in the current active LLI */
+		/* First get the remaining bytes in the active transfer */
 		bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
 
 		if (clli) {
-			int i = 0;
+			struct pl08x_lli *llis_va = txd->llis_va;
+			dma_addr_t llis_bus = txd->llis_bus;
+			int index;
 
-			/* Forward to the LLI pointed to by clli */
-			while ((clli != (u32) &(llis_bus[i])) &&
-			       (i < MAX_NUM_TSFR_LLIS))
-				i++;
+			BUG_ON(clli < llis_bus || clli >= llis_bus +
+				sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
 
-			while (clli) {
-				bytes += get_bytes_in_cctl(llis_va[i].cctl);
+			/*
+			 * Locate the next LLI - as this is an array,
+			 * it's simple maths to find.
+			 */
+			index = (clli - llis_bus) / sizeof(struct pl08x_lli);
+
+			for (; index < MAX_NUM_TSFR_LLIS; index++) {
+				bytes += get_bytes_in_cctl(llis_va[index].cctl);
+
 				/*
-				 * A clli of 0x00000000 will terminate the
-				 * LLI list
+				 * A LLI pointer of 0 terminates the LLI list
 				 */
-				clli = llis_va[i].next;
-				i++;
+				if (!llis_va[index].lli)
+					break;
 			}
 		}
 	}
 
 	/* Sum up all queued transactions */
-	if (!list_empty(&plchan->desc_list)) {
-		list_for_each_entry(txdi, &plchan->desc_list, node) {
+	if (!list_empty(&plchan->pend_list)) {
+		struct pl08x_txd *txdi;
+		list_for_each_entry(txdi, &plchan->pend_list, node) {
 			bytes += txdi->len;
 		}
-
 	}
 
 	spin_unlock_irqrestore(&plchan->lock, flags);
@@ -390,6 +363,10 @@
 
 /*
  * Allocate a physical channel for a virtual channel
+ *
+ * Try to locate a physical channel to be used for this transfer. If all
+ * are taken return NULL and the requester will have to cope by using
+ * some fallback PIO mode or retrying later.
  */
 static struct pl08x_phy_chan *
 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
@@ -399,12 +376,6 @@
 	unsigned long flags;
 	int i;
 
-	/*
-	 * Try to locate a physical channel to be used for
-	 * this transfer. If all are taken return NULL and
-	 * the requester will have to cope by using some fallback
-	 * PIO mode or retrying later.
-	 */
 	for (i = 0; i < pl08x->vd->channels; i++) {
 		ch = &pl08x->phy_chans[i];
 
@@ -465,11 +436,11 @@
 }
 
 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
-				  u32 tsize)
+				  size_t tsize)
 {
 	u32 retbits = cctl;
 
-	/* Remove all src, dst and transfersize bits */
+	/* Remove all src, dst and transfer size bits */
 	retbits &= ~PL080_CONTROL_DWIDTH_MASK;
 	retbits &= ~PL080_CONTROL_SWIDTH_MASK;
 	retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
@@ -509,95 +480,87 @@
 	return retbits;
 }
 
+struct pl08x_lli_build_data {
+	struct pl08x_txd *txd;
+	struct pl08x_driver_data *pl08x;
+	struct pl08x_bus_data srcbus;
+	struct pl08x_bus_data dstbus;
+	size_t remainder;
+};
+
 /*
- * Autoselect a master bus to use for the transfer
- * this prefers the destination bus if both available
- * if fixed address on one bus the other will be chosen
+ * Autoselect a master bus to use for the transfer this prefers the
+ * destination bus if both available if fixed address on one bus the
+ * other will be chosen
  */
-void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
-	struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
-	struct pl08x_bus_data **sbus, u32 cctl)
+static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
+	struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
 {
 	if (!(cctl & PL080_CONTROL_DST_INCR)) {
-		*mbus = src_bus;
-		*sbus = dst_bus;
+		*mbus = &bd->srcbus;
+		*sbus = &bd->dstbus;
 	} else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
-		*mbus = dst_bus;
-		*sbus = src_bus;
+		*mbus = &bd->dstbus;
+		*sbus = &bd->srcbus;
 	} else {
-		if (dst_bus->buswidth == 4) {
-			*mbus = dst_bus;
-			*sbus = src_bus;
-		} else if (src_bus->buswidth == 4) {
-			*mbus = src_bus;
-			*sbus = dst_bus;
-		} else if (dst_bus->buswidth == 2) {
-			*mbus = dst_bus;
-			*sbus = src_bus;
-		} else if (src_bus->buswidth == 2) {
-			*mbus = src_bus;
-			*sbus = dst_bus;
+		if (bd->dstbus.buswidth == 4) {
+			*mbus = &bd->dstbus;
+			*sbus = &bd->srcbus;
+		} else if (bd->srcbus.buswidth == 4) {
+			*mbus = &bd->srcbus;
+			*sbus = &bd->dstbus;
+		} else if (bd->dstbus.buswidth == 2) {
+			*mbus = &bd->dstbus;
+			*sbus = &bd->srcbus;
+		} else if (bd->srcbus.buswidth == 2) {
+			*mbus = &bd->srcbus;
+			*sbus = &bd->dstbus;
 		} else {
-			/* src_bus->buswidth == 1 */
-			*mbus = dst_bus;
-			*sbus = src_bus;
+			/* bd->srcbus.buswidth == 1 */
+			*mbus = &bd->dstbus;
+			*sbus = &bd->srcbus;
 		}
 	}
 }
 
 /*
- * Fills in one LLI for a certain transfer descriptor
- * and advance the counter
+ * Fills in one LLI for a certain transfer descriptor and advance the counter
  */
-int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
-			    struct pl08x_txd *txd, int num_llis, int len,
-			    u32 cctl, u32 *remainder)
+static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
+	int num_llis, int len, u32 cctl)
 {
-	struct lli *llis_va = txd->llis_va;
-	struct lli *llis_bus = (struct lli *) txd->llis_bus;
+	struct pl08x_lli *llis_va = bd->txd->llis_va;
+	dma_addr_t llis_bus = bd->txd->llis_bus;
 
 	BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
 
-	llis_va[num_llis].cctl		= cctl;
-	llis_va[num_llis].src		= txd->srcbus.addr;
-	llis_va[num_llis].dst		= txd->dstbus.addr;
-
-	/*
-	 * On versions with dual masters, you can optionally AND on
-	 * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
-	 * in new LLIs with that controller, but we always try to
-	 * choose AHB1 to point into memory. The idea is to have AHB2
-	 * fixed on the peripheral and AHB1 messing around in the
-	 * memory. So we don't manipulate this bit currently.
-	 */
-
-	llis_va[num_llis].next =
-		(dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
+	llis_va[num_llis].cctl = cctl;
+	llis_va[num_llis].src = bd->srcbus.addr;
+	llis_va[num_llis].dst = bd->dstbus.addr;
+	llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
+	if (bd->pl08x->lli_buses & PL08X_AHB2)
+		llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
 
 	if (cctl & PL080_CONTROL_SRC_INCR)
-		txd->srcbus.addr += len;
+		bd->srcbus.addr += len;
 	if (cctl & PL080_CONTROL_DST_INCR)
-		txd->dstbus.addr += len;
+		bd->dstbus.addr += len;
 
-	*remainder -= len;
+	BUG_ON(bd->remainder < len);
 
-	return num_llis + 1;
+	bd->remainder -= len;
 }
 
 /*
- * Return number of bytes to fill to boundary, or len
+ * Return number of bytes to fill to boundary, or len.
+ * This calculation works for any value of addr.
  */
-static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
+static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
 {
-	u32 boundary;
+	size_t boundary_len = PL08X_BOUNDARY_SIZE -
+			(addr & (PL08X_BOUNDARY_SIZE - 1));
 
-	boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
-		<< PL08X_BOUNDARY_SHIFT;
-
-	if (boundary < addr + len)
-		return boundary - addr;
-	else
-		return len;
+	return min(boundary_len, len);
 }
 
 /*
@@ -608,20 +571,13 @@
 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
 			      struct pl08x_txd *txd)
 {
-	struct pl08x_channel_data *cd = txd->cd;
 	struct pl08x_bus_data *mbus, *sbus;
-	u32 remainder;
+	struct pl08x_lli_build_data bd;
 	int num_llis = 0;
 	u32 cctl;
-	int max_bytes_per_lli;
-	int total_bytes = 0;
-	struct lli *llis_va;
-	struct lli *llis_bus;
-
-	if (!txd) {
-		dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
-		return 0;
-	}
+	size_t max_bytes_per_lli;
+	size_t total_bytes = 0;
+	struct pl08x_lli *llis_va;
 
 	txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
 				      &txd->llis_bus);
@@ -632,121 +588,79 @@
 
 	pl08x->pool_ctr++;
 
-	/*
-	 * Initialize bus values for this transfer
-	 * from the passed optimal values
-	 */
-	if (!cd) {
-		dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
-		return 0;
-	}
+	/* Get the default CCTL */
+	cctl = txd->cctl;
 
-	/* Get the default CCTL from the platform data */
-	cctl = cd->cctl;
-
-	/*
-	 * On the PL080 we have two bus masters and we
-	 * should select one for source and one for
-	 * destination. We try to use AHB2 for the
-	 * bus which does not increment (typically the
-	 * peripheral) else we just choose something.
-	 */
-	cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
-	if (pl08x->vd->dualmaster) {
-		if (cctl & PL080_CONTROL_SRC_INCR)
-			/* Source increments, use AHB2 for destination */
-			cctl |= PL080_CONTROL_DST_AHB2;
-		else if (cctl & PL080_CONTROL_DST_INCR)
-			/* Destination increments, use AHB2 for source */
-			cctl |= PL080_CONTROL_SRC_AHB2;
-		else
-			/* Just pick something, source AHB1 dest AHB2 */
-			cctl |= PL080_CONTROL_DST_AHB2;
-	}
+	bd.txd = txd;
+	bd.pl08x = pl08x;
+	bd.srcbus.addr = txd->src_addr;
+	bd.dstbus.addr = txd->dst_addr;
 
 	/* Find maximum width of the source bus */
-	txd->srcbus.maxwidth =
+	bd.srcbus.maxwidth =
 		pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
 				       PL080_CONTROL_SWIDTH_SHIFT);
 
 	/* Find maximum width of the destination bus */
-	txd->dstbus.maxwidth =
+	bd.dstbus.maxwidth =
 		pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
 				       PL080_CONTROL_DWIDTH_SHIFT);
 
 	/* Set up the bus widths to the maximum */
-	txd->srcbus.buswidth = txd->srcbus.maxwidth;
-	txd->dstbus.buswidth = txd->dstbus.maxwidth;
+	bd.srcbus.buswidth = bd.srcbus.maxwidth;
+	bd.dstbus.buswidth = bd.dstbus.maxwidth;
 	dev_vdbg(&pl08x->adev->dev,
 		 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
-		 __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
+		 __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
 
 
 	/*
 	 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
 	 */
-	max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
+	max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
 		PL080_CONTROL_TRANSFER_SIZE_MASK;
 	dev_vdbg(&pl08x->adev->dev,
-		 "%s max bytes per lli = %d\n",
+		 "%s max bytes per lli = %zu\n",
 		 __func__, max_bytes_per_lli);
 
 	/* We need to count this down to zero */
-	remainder = txd->len;
+	bd.remainder = txd->len;
 	dev_vdbg(&pl08x->adev->dev,
-		 "%s remainder = %d\n",
-		 __func__, remainder);
+		 "%s remainder = %zu\n",
+		 __func__, bd.remainder);
 
 	/*
 	 * Choose bus to align to
 	 * - prefers destination bus if both available
 	 * - if fixed address on one bus chooses other
-	 * - modifies cctl to choose an apropriate master
 	 */
-	pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
-				&mbus, &sbus, cctl);
-
-
-	/*
-	 * The lowest bit of the LLI register
-	 * is also used to indicate which master to
-	 * use for reading the LLIs.
-	 */
+	pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
 
 	if (txd->len < mbus->buswidth) {
-		/*
-		 * Less than a bus width available
-		 * - send as single bytes
-		 */
-		while (remainder) {
+		/* Less than a bus width available - send as single bytes */
+		while (bd.remainder) {
 			dev_vdbg(&pl08x->adev->dev,
 				 "%s single byte LLIs for a transfer of "
-				 "less than a bus width (remain %08x)\n",
-				 __func__, remainder);
+				 "less than a bus width (remain 0x%08x)\n",
+				 __func__, bd.remainder);
 			cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-			num_llis =
-				pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
-					cctl, &remainder);
+			pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
 			total_bytes++;
 		}
 	} else {
-		/*
-		 *  Make one byte LLIs until master bus is aligned
-		 *  - slave will then be aligned also
-		 */
+		/* Make one byte LLIs until master bus is aligned */
 		while ((mbus->addr) % (mbus->buswidth)) {
 			dev_vdbg(&pl08x->adev->dev,
 				"%s adjustment lli for less than bus width "
-				 "(remain %08x)\n",
-				 __func__, remainder);
+				 "(remain 0x%08x)\n",
+				 __func__, bd.remainder);
 			cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-			num_llis = pl08x_fill_lli_for_desc
-				(pl08x, txd, num_llis, 1, cctl, &remainder);
+			pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
 			total_bytes++;
 		}
 
 		/*
-		 *  Master now aligned
+		 * Master now aligned
 		 * - if slave is not then we must set its width down
 		 */
 		if (sbus->addr % sbus->buswidth) {
@@ -761,63 +675,51 @@
 		 * Make largest possible LLIs until less than one bus
 		 * width left
 		 */
-		while (remainder > (mbus->buswidth - 1)) {
-			int lli_len, target_len;
-			int tsize;
-			int odd_bytes;
+		while (bd.remainder > (mbus->buswidth - 1)) {
+			size_t lli_len, target_len, tsize, odd_bytes;
 
 			/*
 			 * If enough left try to send max possible,
 			 * otherwise try to send the remainder
 			 */
-			target_len = remainder;
-			if (remainder > max_bytes_per_lli)
-				target_len = max_bytes_per_lli;
+			target_len = min(bd.remainder, max_bytes_per_lli);
 
 			/*
-			 * Set bus lengths for incrementing busses
-			 * to number of bytes which fill to next memory
-			 * boundary
+			 * Set bus lengths for incrementing buses to the
+			 * number of bytes which fill to next memory boundary,
+			 * limiting on the target length calculated above.
 			 */
 			if (cctl & PL080_CONTROL_SRC_INCR)
-				txd->srcbus.fill_bytes =
-					pl08x_pre_boundary(
-						txd->srcbus.addr,
-						remainder);
+				bd.srcbus.fill_bytes =
+					pl08x_pre_boundary(bd.srcbus.addr,
+						target_len);
 			else
-				txd->srcbus.fill_bytes =
-					max_bytes_per_lli;
+				bd.srcbus.fill_bytes = target_len;
 
 			if (cctl & PL080_CONTROL_DST_INCR)
-				txd->dstbus.fill_bytes =
-					pl08x_pre_boundary(
-						txd->dstbus.addr,
-						remainder);
+				bd.dstbus.fill_bytes =
+					pl08x_pre_boundary(bd.dstbus.addr,
+						target_len);
 			else
-				txd->dstbus.fill_bytes =
-						max_bytes_per_lli;
+				bd.dstbus.fill_bytes = target_len;
 
-			/*
-			 *  Find the nearest
-			 */
-			lli_len	= min(txd->srcbus.fill_bytes,
-				txd->dstbus.fill_bytes);
+			/* Find the nearest */
+			lli_len	= min(bd.srcbus.fill_bytes,
+				      bd.dstbus.fill_bytes);
 
-			BUG_ON(lli_len > remainder);
+			BUG_ON(lli_len > bd.remainder);
 
 			if (lli_len <= 0) {
 				dev_err(&pl08x->adev->dev,
-					"%s lli_len is %d, <= 0\n",
+					"%s lli_len is %zu, <= 0\n",
 						__func__, lli_len);
 				return 0;
 			}
 
 			if (lli_len == target_len) {
 				/*
-				 * Can send what we wanted
-				 */
-				/*
-				 *  Maintain alignment
+				 * Can send what we wanted.
+				 * Maintain alignment
 				 */
 				lli_len	= (lli_len/mbus->buswidth) *
 							mbus->buswidth;
@@ -825,17 +727,14 @@
 			} else {
 				/*
 				 * So now we know how many bytes to transfer
-				 * to get to the nearest boundary
-				 * The next lli will past the boundary
-				 * - however we may be working to a boundary
-				 *   on the slave bus
-				 *   We need to ensure the master stays aligned
+				 * to get to the nearest boundary.  The next
+				 * LLI will past the boundary.  However, we
+				 * may be working to a boundary on the slave
+				 * bus.  We need to ensure the master stays
+				 * aligned, and that we are working in
+				 * multiples of the bus widths.
 				 */
 				odd_bytes = lli_len % mbus->buswidth;
-				/*
-				 * - and that we are working in multiples
-				 *   of the bus widths
-				 */
 				lli_len -= odd_bytes;
 
 			}
@@ -855,41 +754,38 @@
 
 				if (target_len != lli_len) {
 					dev_vdbg(&pl08x->adev->dev,
-					"%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n",
+					"%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
 					__func__, target_len, lli_len, txd->len);
 				}
 
 				cctl = pl08x_cctl_bits(cctl,
-						       txd->srcbus.buswidth,
-						       txd->dstbus.buswidth,
+						       bd.srcbus.buswidth,
+						       bd.dstbus.buswidth,
 						       tsize);
 
 				dev_vdbg(&pl08x->adev->dev,
-					"%s fill lli with single lli chunk of size %08x (remainder %08x)\n",
-					__func__, lli_len, remainder);
-				num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
-						num_llis, lli_len, cctl,
-						&remainder);
+					"%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
+					__func__, lli_len, bd.remainder);
+				pl08x_fill_lli_for_desc(&bd, num_llis++,
+					lli_len, cctl);
 				total_bytes += lli_len;
 			}
 
 
 			if (odd_bytes) {
 				/*
-				 * Creep past the boundary,
-				 * maintaining master alignment
+				 * Creep past the boundary, maintaining
+				 * master alignment
 				 */
 				int j;
 				for (j = 0; (j < mbus->buswidth)
-						&& (remainder); j++) {
+						&& (bd.remainder); j++) {
 					cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
 					dev_vdbg(&pl08x->adev->dev,
-						"%s align with boundardy, single byte (remain %08x)\n",
-						__func__, remainder);
-					num_llis =
-						pl08x_fill_lli_for_desc(pl08x,
-							txd, num_llis, 1,
-							cctl, &remainder);
+						"%s align with boundary, single byte (remain 0x%08zx)\n",
+						__func__, bd.remainder);
+					pl08x_fill_lli_for_desc(&bd,
+						num_llis++, 1, cctl);
 					total_bytes++;
 				}
 			}
@@ -898,25 +794,18 @@
 		/*
 		 * Send any odd bytes
 		 */
-		if (remainder < 0) {
-			dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
-					__func__, remainder);
-			return 0;
-		}
-
-		while (remainder) {
+		while (bd.remainder) {
 			cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
 			dev_vdbg(&pl08x->adev->dev,
-				"%s align with boundardy, single odd byte (remain %d)\n",
-				__func__, remainder);
-			num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
-					1, cctl, &remainder);
+				"%s align with boundary, single odd byte (remain %zu)\n",
+				__func__, bd.remainder);
+			pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
 			total_bytes++;
 		}
 	}
 	if (total_bytes != txd->len) {
 		dev_err(&pl08x->adev->dev,
-			"%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n",
+			"%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
 			__func__, total_bytes, txd->len);
 		return 0;
 	}
@@ -927,41 +816,12 @@
 			__func__, (u32) MAX_NUM_TSFR_LLIS);
 		return 0;
 	}
-	/*
-	 * Decide whether this is a loop or a terminated transfer
-	 */
+
 	llis_va = txd->llis_va;
-	llis_bus = (struct lli *) txd->llis_bus;
-
-	if (cd->circular_buffer) {
-		/*
-		 * Loop the circular buffer so that the next element
-		 * points back to the beginning of the LLI.
-		 */
-		llis_va[num_llis - 1].next =
-			(dma_addr_t)((unsigned int)&(llis_bus[0]));
-	} else {
-		/*
-		 * On non-circular buffers, the final LLI terminates
-		 * the LLI.
-		 */
-		llis_va[num_llis - 1].next = 0;
-		/*
-		 * The final LLI element shall also fire an interrupt
-		 */
-		llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
-	}
-
-	/* Now store the channel register values */
-	txd->csrc = llis_va[0].src;
-	txd->cdst = llis_va[0].dst;
-	if (num_llis > 1)
-		txd->clli = llis_va[0].next;
-	else
-		txd->clli = 0;
-
-	txd->cctl = llis_va[0].cctl;
-	/* ccfg will be set at physical channel allocation time */
+	/* The final LLI terminates the LLI. */
+	llis_va[num_llis - 1].lli = 0;
+	/* The final LLI element shall also fire an interrupt. */
+	llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
 
 #ifdef VERBOSE_DEBUG
 	{
@@ -969,13 +829,13 @@
 
 		for (i = 0; i < num_llis; i++) {
 			dev_vdbg(&pl08x->adev->dev,
-				 "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n",
+				 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
 				 i,
 				 &llis_va[i],
 				 llis_va[i].src,
 				 llis_va[i].dst,
 				 llis_va[i].cctl,
-				 llis_va[i].next
+				 llis_va[i].lli
 				);
 		}
 	}
@@ -988,14 +848,8 @@
 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
 			   struct pl08x_txd *txd)
 {
-	if (!txd)
-		dev_err(&pl08x->adev->dev,
-			"%s no descriptor to free\n",
-			__func__);
-
 	/* Free the LLI */
-	dma_pool_free(pl08x->pool, txd->llis_va,
-		      txd->llis_bus);
+	dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
 
 	pl08x->pool_ctr--;
 
@@ -1008,13 +862,12 @@
 	struct pl08x_txd *txdi = NULL;
 	struct pl08x_txd *next;
 
-	if (!list_empty(&plchan->desc_list)) {
+	if (!list_empty(&plchan->pend_list)) {
 		list_for_each_entry_safe(txdi,
-					 next, &plchan->desc_list, node) {
+					 next, &plchan->pend_list, node) {
 			list_del(&txdi->node);
 			pl08x_free_txd(pl08x, txdi);
 		}
-
 	}
 }
 
@@ -1069,6 +922,12 @@
 			return -EBUSY;
 		}
 		ch->signal = ret;
+
+		/* Assign the flow control signal to this channel */
+		if (txd->direction == DMA_TO_DEVICE)
+			txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+		else if (txd->direction == DMA_FROM_DEVICE)
+			txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
 	}
 
 	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
@@ -1076,19 +935,54 @@
 		 ch->signal,
 		 plchan->name);
 
+	plchan->phychan_hold++;
 	plchan->phychan = ch;
 
 	return 0;
 }
 
+static void release_phy_channel(struct pl08x_dma_chan *plchan)
+{
+	struct pl08x_driver_data *pl08x = plchan->host;
+
+	if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
+		pl08x->pd->put_signal(plchan);
+		plchan->phychan->signal = -1;
+	}
+	pl08x_put_phy_channel(pl08x, plchan->phychan);
+	plchan->phychan = NULL;
+}
+
 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
+	struct pl08x_txd *txd = to_pl08x_txd(tx);
+	unsigned long flags;
 
-	atomic_inc(&plchan->last_issued);
-	tx->cookie = atomic_read(&plchan->last_issued);
-	/* This unlock follows the lock in the prep() function */
-	spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+	spin_lock_irqsave(&plchan->lock, flags);
+
+	plchan->chan.cookie += 1;
+	if (plchan->chan.cookie < 0)
+		plchan->chan.cookie = 1;
+	tx->cookie = plchan->chan.cookie;
+
+	/* Put this onto the pending list */
+	list_add_tail(&txd->node, &plchan->pend_list);
+
+	/*
+	 * If there was no physical channel available for this memcpy,
+	 * stack the request up and indicate that the channel is waiting
+	 * for a free physical channel.
+	 */
+	if (!plchan->slave && !plchan->phychan) {
+		/* Do this memcpy whenever there is a channel ready */
+		plchan->state = PL08X_CHAN_WAITING;
+		plchan->waiting = txd;
+	} else {
+		plchan->phychan_hold--;
+	}
+
+	spin_unlock_irqrestore(&plchan->lock, flags);
 
 	return tx->cookie;
 }
@@ -1102,10 +996,9 @@
 }
 
 /*
- * Code accessing dma_async_is_complete() in a tight loop
- * may give problems - could schedule where indicated.
- * If slaves are relying on interrupts to signal completion this
- * function must not be called with interrupts disabled
+ * Code accessing dma_async_is_complete() in a tight loop may give problems.
+ * If slaves are relying on interrupts to signal completion this function
+ * must not be called with interrupts disabled.
  */
 static enum dma_status
 pl08x_dma_tx_status(struct dma_chan *chan,
@@ -1118,7 +1011,7 @@
 	enum dma_status ret;
 	u32 bytesleft = 0;
 
-	last_used = atomic_read(&plchan->last_issued);
+	last_used = plchan->chan.cookie;
 	last_complete = plchan->lc;
 
 	ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1128,13 +1021,9 @@
 	}
 
 	/*
-	 * schedule(); could be inserted here
-	 */
-
-	/*
 	 * This cookie not complete yet
 	 */
-	last_used = atomic_read(&plchan->last_issued);
+	last_used = plchan->chan.cookie;
 	last_complete = plchan->lc;
 
 	/* Get number of bytes left in the active transactions and queue */
@@ -1199,37 +1088,35 @@
 	},
 };
 
-static void dma_set_runtime_config(struct dma_chan *chan,
-			       struct dma_slave_config *config)
+static int dma_set_runtime_config(struct dma_chan *chan,
+				  struct dma_slave_config *config)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_channel_data *cd = plchan->cd;
 	enum dma_slave_buswidth addr_width;
+	dma_addr_t addr;
 	u32 maxburst;
 	u32 cctl = 0;
-	/* Mask out all except src and dst channel */
-	u32 ccfg = cd->ccfg & 0x000003DEU;
-	int i = 0;
+	int i;
+
+	if (!plchan->slave)
+		return -EINVAL;
 
 	/* Transfer direction */
 	plchan->runtime_direction = config->direction;
 	if (config->direction == DMA_TO_DEVICE) {
-		plchan->runtime_addr = config->dst_addr;
-		cctl |= PL080_CONTROL_SRC_INCR;
-		ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+		addr = config->dst_addr;
 		addr_width = config->dst_addr_width;
 		maxburst = config->dst_maxburst;
 	} else if (config->direction == DMA_FROM_DEVICE) {
-		plchan->runtime_addr = config->src_addr;
-		cctl |= PL080_CONTROL_DST_INCR;
-		ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+		addr = config->src_addr;
 		addr_width = config->src_addr_width;
 		maxburst = config->src_maxburst;
 	} else {
 		dev_err(&pl08x->adev->dev,
 			"bad runtime_config: alien transfer direction\n");
-		return;
+		return -EINVAL;
 	}
 
 	switch (addr_width) {
@@ -1248,42 +1135,40 @@
 	default:
 		dev_err(&pl08x->adev->dev,
 			"bad runtime_config: alien address width\n");
-		return;
+		return -EINVAL;
 	}
 
 	/*
 	 * Now decide on a maxburst:
-	 * If this channel will only request single transfers, set
-	 * this down to ONE element.
+	 * If this channel will only request single transfers, set this
+	 * down to ONE element.  Also select one element if no maxburst
+	 * is specified.
 	 */
-	if (plchan->cd->single) {
+	if (plchan->cd->single || maxburst == 0) {
 		cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
 			(PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
 	} else {
-		while (i < ARRAY_SIZE(burst_sizes)) {
+		for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
 			if (burst_sizes[i].burstwords <= maxburst)
 				break;
-			i++;
-		}
 		cctl |= burst_sizes[i].reg;
 	}
 
-	/* Access the cell in privileged mode, non-bufferable, non-cacheable */
-	cctl &= ~PL080_CONTROL_PROT_MASK;
-	cctl |= PL080_CONTROL_PROT_SYS;
+	plchan->runtime_addr = addr;
 
 	/* Modify the default channel data to fit PrimeCell request */
 	cd->cctl = cctl;
-	cd->ccfg = ccfg;
 
 	dev_dbg(&pl08x->adev->dev,
 		"configured channel %s (%s) for %s, data width %d, "
-		"maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n",
+		"maxburst %d words, LE, CCTL=0x%08x\n",
 		dma_chan_name(chan), plchan->name,
 		(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
 		addr_width,
 		maxburst,
-		cctl, ccfg);
+		cctl);
+
+	return 0;
 }
 
 /*
@@ -1293,35 +1178,26 @@
 static void pl08x_issue_pending(struct dma_chan *chan)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
-	struct pl08x_driver_data *pl08x = plchan->host;
 	unsigned long flags;
 
 	spin_lock_irqsave(&plchan->lock, flags);
-	/* Something is already active */
-	if (plchan->at) {
-			spin_unlock_irqrestore(&plchan->lock, flags);
-			return;
+	/* Something is already active, or we're waiting for a channel... */
+	if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
+		spin_unlock_irqrestore(&plchan->lock, flags);
+		return;
 	}
 
-	/* Didn't get a physical channel so waiting for it ... */
-	if (plchan->state == PL08X_CHAN_WAITING)
-		return;
-
 	/* Take the first element in the queue and execute it */
-	if (!list_empty(&plchan->desc_list)) {
+	if (!list_empty(&plchan->pend_list)) {
 		struct pl08x_txd *next;
 
-		next = list_first_entry(&plchan->desc_list,
+		next = list_first_entry(&plchan->pend_list,
 					struct pl08x_txd,
 					node);
 		list_del(&next->node);
-		plchan->at = next;
 		plchan->state = PL08X_CHAN_RUNNING;
 
-		/* Configure the physical channel for the active txd */
-		pl08x_config_phychan_for_txd(plchan);
-		pl08x_set_cregs(pl08x, plchan->phychan);
-		pl08x_enable_phy_chan(pl08x, plchan->phychan);
+		pl08x_start_txd(plchan, next);
 	}
 
 	spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1330,30 +1206,17 @@
 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 					struct pl08x_txd *txd)
 {
-	int num_llis;
 	struct pl08x_driver_data *pl08x = plchan->host;
-	int ret;
+	unsigned long flags;
+	int num_llis, ret;
 
 	num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
-
-	if (!num_llis)
+	if (!num_llis) {
+		kfree(txd);
 		return -EINVAL;
+	}
 
-	spin_lock_irqsave(&plchan->lock, plchan->lockflags);
-
-	/*
-	 * If this device is not using a circular buffer then
-	 * queue this new descriptor for transfer.
-	 * The descriptor for a circular buffer continues
-	 * to be used until the channel is freed.
-	 */
-	if (txd->cd->circular_buffer)
-		dev_err(&pl08x->adev->dev,
-			"%s attempting to queue a circular buffer\n",
-			__func__);
-	else
-		list_add_tail(&txd->node,
-			      &plchan->desc_list);
+	spin_lock_irqsave(&plchan->lock, flags);
 
 	/*
 	 * See if we already have a physical channel allocated,
@@ -1362,45 +1225,74 @@
 	ret = prep_phy_channel(plchan, txd);
 	if (ret) {
 		/*
-		 * No physical channel available, we will
-		 * stack up the memcpy channels until there is a channel
-		 * available to handle it whereas slave transfers may
-		 * have been denied due to platform channel muxing restrictions
-		 * and since there is no guarantee that this will ever be
-		 * resolved, and since the signal must be aquired AFTER
-		 * aquiring the physical channel, we will let them be NACK:ed
-		 * with -EBUSY here. The drivers can alway retry the prep()
-		 * call if they are eager on doing this using DMA.
+		 * No physical channel was available.
+		 *
+		 * memcpy transfers can be sorted out at submission time.
+		 *
+		 * Slave transfers may have been denied due to platform
+		 * channel muxing restrictions.  Since there is no guarantee
+		 * that this will ever be resolved, and the signal must be
+		 * acquired AFTER acquiring the physical channel, we will let
+		 * them be NACK:ed with -EBUSY here. The drivers can retry
+		 * the prep() call if they are eager on doing this using DMA.
 		 */
 		if (plchan->slave) {
 			pl08x_free_txd_list(pl08x, plchan);
-			spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+			pl08x_free_txd(pl08x, txd);
+			spin_unlock_irqrestore(&plchan->lock, flags);
 			return -EBUSY;
 		}
-		/* Do this memcpy whenever there is a channel ready */
-		plchan->state = PL08X_CHAN_WAITING;
-		plchan->waiting = txd;
 	} else
 		/*
-		 * Else we're all set, paused and ready to roll,
-		 * status will switch to PL08X_CHAN_RUNNING when
-		 * we call issue_pending(). If there is something
-		 * running on the channel already we don't change
-		 * its state.
+		 * Else we're all set, paused and ready to roll, status
+		 * will switch to PL08X_CHAN_RUNNING when we call
+		 * issue_pending(). If there is something running on the
+		 * channel already we don't change its state.
 		 */
 		if (plchan->state == PL08X_CHAN_IDLE)
 			plchan->state = PL08X_CHAN_PAUSED;
 
-	/*
-	 * Notice that we leave plchan->lock locked on purpose:
-	 * it will be unlocked in the subsequent tx_submit()
-	 * call. This is a consequence of the current API.
-	 */
+	spin_unlock_irqrestore(&plchan->lock, flags);
 
 	return 0;
 }
 
 /*
+ * Given the source and destination available bus masks, select which
+ * will be routed to each port.  We try to have source and destination
+ * on separate ports, but always respect the allowable settings.
+ */
+static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
+{
+	u32 cctl = 0;
+
+	if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
+		cctl |= PL080_CONTROL_DST_AHB2;
+	if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
+		cctl |= PL080_CONTROL_SRC_AHB2;
+
+	return cctl;
+}
+
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
+	unsigned long flags)
+{
+	struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+
+	if (txd) {
+		dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
+		txd->tx.flags = flags;
+		txd->tx.tx_submit = pl08x_tx_submit;
+		INIT_LIST_HEAD(&txd->node);
+
+		/* Always enable error and terminal interrupts */
+		txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
+			    PL080_CONFIG_TC_IRQ_MASK;
+	}
+	return txd;
+}
+
+/*
  * Initialize a descriptor to be used by memcpy submit
  */
 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
@@ -1412,40 +1304,38 @@
 	struct pl08x_txd *txd;
 	int ret;
 
-	txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+	txd = pl08x_get_txd(plchan, flags);
 	if (!txd) {
 		dev_err(&pl08x->adev->dev,
 			"%s no memory for descriptor\n", __func__);
 		return NULL;
 	}
 
-	dma_async_tx_descriptor_init(&txd->tx, chan);
 	txd->direction = DMA_NONE;
-	txd->srcbus.addr = src;
-	txd->dstbus.addr = dest;
-
-	/* Set platform data for m2m */
-	txd->cd = &pl08x->pd->memcpy_channel;
-	/* Both to be incremented or the code will break */
-	txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
-	txd->tx.tx_submit = pl08x_tx_submit;
-	txd->tx.callback = NULL;
-	txd->tx.callback_param = NULL;
+	txd->src_addr = src;
+	txd->dst_addr = dest;
 	txd->len = len;
 
-	INIT_LIST_HEAD(&txd->node);
+	/* Set platform data for m2m */
+	txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+	txd->cctl = pl08x->pd->memcpy_channel.cctl &
+			~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
+
+	/* Both to be incremented or the code will break */
+	txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
+
+	if (pl08x->vd->dualmaster)
+		txd->cctl |= pl08x_select_bus(pl08x,
+					pl08x->mem_buses, pl08x->mem_buses);
+
 	ret = pl08x_prep_channel_resources(plchan, txd);
 	if (ret)
 		return NULL;
-	/*
-	 * NB: the channel lock is held at this point so tx_submit()
-	 * must be called in direct succession.
-	 */
 
 	return &txd->tx;
 }
 
-struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		struct dma_chan *chan, struct scatterlist *sgl,
 		unsigned int sg_len, enum dma_data_direction direction,
 		unsigned long flags)
@@ -1453,6 +1343,7 @@
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_txd *txd;
+	u8 src_buses, dst_buses;
 	int ret;
 
 	/*
@@ -1467,14 +1358,12 @@
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
 		__func__, sgl->length, plchan->name);
 
-	txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+	txd = pl08x_get_txd(plchan, flags);
 	if (!txd) {
 		dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
 		return NULL;
 	}
 
-	dma_async_tx_descriptor_init(&txd->tx, chan);
-
 	if (direction != plchan->runtime_direction)
 		dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
 			"the direction configured for the PrimeCell\n",
@@ -1486,37 +1375,47 @@
 	 * channel target address dynamically at runtime.
 	 */
 	txd->direction = direction;
+	txd->len = sgl->length;
+
+	txd->cctl = plchan->cd->cctl &
+			~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
+			  PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
+			  PL080_CONTROL_PROT_MASK);
+
+	/* Access the cell in privileged mode, non-bufferable, non-cacheable */
+	txd->cctl |= PL080_CONTROL_PROT_SYS;
+
 	if (direction == DMA_TO_DEVICE) {
-		txd->srcbus.addr = sgl->dma_address;
+		txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+		txd->cctl |= PL080_CONTROL_SRC_INCR;
+		txd->src_addr = sgl->dma_address;
 		if (plchan->runtime_addr)
-			txd->dstbus.addr = plchan->runtime_addr;
+			txd->dst_addr = plchan->runtime_addr;
 		else
-			txd->dstbus.addr = plchan->cd->addr;
+			txd->dst_addr = plchan->cd->addr;
+		src_buses = pl08x->mem_buses;
+		dst_buses = plchan->cd->periph_buses;
 	} else if (direction == DMA_FROM_DEVICE) {
+		txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+		txd->cctl |= PL080_CONTROL_DST_INCR;
 		if (plchan->runtime_addr)
-			txd->srcbus.addr = plchan->runtime_addr;
+			txd->src_addr = plchan->runtime_addr;
 		else
-			txd->srcbus.addr = plchan->cd->addr;
-		txd->dstbus.addr = sgl->dma_address;
+			txd->src_addr = plchan->cd->addr;
+		txd->dst_addr = sgl->dma_address;
+		src_buses = plchan->cd->periph_buses;
+		dst_buses = pl08x->mem_buses;
 	} else {
 		dev_err(&pl08x->adev->dev,
 			"%s direction unsupported\n", __func__);
 		return NULL;
 	}
-	txd->cd = plchan->cd;
-	txd->tx.tx_submit = pl08x_tx_submit;
-	txd->tx.callback = NULL;
-	txd->tx.callback_param = NULL;
-	txd->len = sgl->length;
-	INIT_LIST_HEAD(&txd->node);
+
+	txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
 
 	ret = pl08x_prep_channel_resources(plchan, txd);
 	if (ret)
 		return NULL;
-	/*
-	 * NB: the channel lock is held at this point so tx_submit()
-	 * must be called in direct succession.
-	 */
 
 	return &txd->tx;
 }
@@ -1531,10 +1430,8 @@
 
 	/* Controls applicable to inactive channels */
 	if (cmd == DMA_SLAVE_CONFIG) {
-		dma_set_runtime_config(chan,
-				       (struct dma_slave_config *)
-				       arg);
-		return 0;
+		return dma_set_runtime_config(chan,
+					      (struct dma_slave_config *)arg);
 	}
 
 	/*
@@ -1558,16 +1455,8 @@
 			 * Mark physical channel as free and free any slave
 			 * signal
 			 */
-			if ((plchan->phychan->signal >= 0) &&
-			    pl08x->pd->put_signal) {
-				pl08x->pd->put_signal(plchan);
-				plchan->phychan->signal = -1;
-			}
-			pl08x_put_phy_channel(pl08x, plchan->phychan);
-			plchan->phychan = NULL;
+			release_phy_channel(plchan);
 		}
-		/* Stop any pending tasklet */
-		tasklet_disable(&plchan->tasklet);
 		/* Dequeue jobs and free LLIs */
 		if (plchan->at) {
 			pl08x_free_txd(pl08x, plchan->at);
@@ -1609,10 +1498,9 @@
 
 /*
  * Just check that the device is there and active
- * TODO: turn this bit on/off depending on the number of
- * physical channels actually used, if it is zero... well
- * shut it off. That will save some power. Cut the clock
- * at the same time.
+ * TODO: turn this bit on/off depending on the number of physical channels
+ * actually used, if it is zero... well shut it off. That will save some
+ * power. Cut the clock at the same time.
  */
 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 {
@@ -1620,78 +1508,66 @@
 
 	val = readl(pl08x->base + PL080_CONFIG);
 	val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
-	/* We implictly clear bit 1 and that means little-endian mode */
+	/* We implicitly clear bit 1 and that means little-endian mode */
 	val |= PL080_CONFIG_ENABLE;
 	writel(val, pl08x->base + PL080_CONFIG);
 }
 
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
+{
+	struct device *dev = txd->tx.chan->device->dev;
+
+	if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+		if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+			dma_unmap_single(dev, txd->src_addr, txd->len,
+				DMA_TO_DEVICE);
+		else
+			dma_unmap_page(dev, txd->src_addr, txd->len,
+				DMA_TO_DEVICE);
+	}
+	if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+		if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+			dma_unmap_single(dev, txd->dst_addr, txd->len,
+				DMA_FROM_DEVICE);
+		else
+			dma_unmap_page(dev, txd->dst_addr, txd->len,
+				DMA_FROM_DEVICE);
+	}
+}
+
 static void pl08x_tasklet(unsigned long data)
 {
 	struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
-	struct pl08x_phy_chan *phychan = plchan->phychan;
 	struct pl08x_driver_data *pl08x = plchan->host;
+	struct pl08x_txd *txd;
+	unsigned long flags;
 
-	if (!plchan)
-		BUG();
+	spin_lock_irqsave(&plchan->lock, flags);
 
-	spin_lock(&plchan->lock);
+	txd = plchan->at;
+	plchan->at = NULL;
 
-	if (plchan->at) {
-		dma_async_tx_callback callback =
-			plchan->at->tx.callback;
-		void *callback_param =
-			plchan->at->tx.callback_param;
-
-		/*
-		 * Update last completed
-		 */
-		plchan->lc =
-			(plchan->at->tx.cookie);
-
-		/*
-		 * Callback to signal completion
-		 */
-		if (callback)
-			callback(callback_param);
-
-		/*
-		 * Device callbacks should NOT clear
-		 * the current transaction on the channel
-		 * Linus: sometimes they should?
-		 */
-		if (!plchan->at)
-			BUG();
-
-		/*
-		 * Free the descriptor if it's not for a device
-		 * using a circular buffer
-		 */
-		if (!plchan->at->cd->circular_buffer) {
-			pl08x_free_txd(pl08x, plchan->at);
-			plchan->at = NULL;
-		}
-		/*
-		 * else descriptor for circular
-		 * buffers only freed when
-		 * client has disabled dma
-		 */
+	if (txd) {
+		/* Update last completed */
+		plchan->lc = txd->tx.cookie;
 	}
-	/*
-	 * If a new descriptor is queued, set it up
-	 * plchan->at is NULL here
-	 */
-	if (!list_empty(&plchan->desc_list)) {
+
+	/* If a new descriptor is queued, set it up plchan->at is NULL here */
+	if (!list_empty(&plchan->pend_list)) {
 		struct pl08x_txd *next;
 
-		next = list_first_entry(&plchan->desc_list,
+		next = list_first_entry(&plchan->pend_list,
 					struct pl08x_txd,
 					node);
 		list_del(&next->node);
-		plchan->at = next;
-		/* Configure the physical channel for the next txd */
-		pl08x_config_phychan_for_txd(plchan);
-		pl08x_set_cregs(pl08x, plchan->phychan);
-		pl08x_enable_phy_chan(pl08x, plchan->phychan);
+
+		pl08x_start_txd(plchan, next);
+	} else if (plchan->phychan_hold) {
+		/*
+		 * This channel is still in use - we have a new txd being
+		 * prepared and will soon be queued.  Don't give up the
+		 * physical channel.
+		 */
 	} else {
 		struct pl08x_dma_chan *waiting = NULL;
 
@@ -1699,20 +1575,14 @@
 		 * No more jobs, so free up the physical channel
 		 * Free any allocated signal on slave transfers too
 		 */
-		if ((phychan->signal >= 0) && pl08x->pd->put_signal) {
-			pl08x->pd->put_signal(plchan);
-			phychan->signal = -1;
-		}
-		pl08x_put_phy_channel(pl08x, phychan);
-		plchan->phychan = NULL;
+		release_phy_channel(plchan);
 		plchan->state = PL08X_CHAN_IDLE;
 
 		/*
-		 * And NOW before anyone else can grab that free:d
-		 * up physical channel, see if there is some memcpy
-		 * pending that seriously needs to start because of
-		 * being stacked up while we were choking the
-		 * physical channels with data.
+		 * And NOW before anyone else can grab that free:d up
+		 * physical channel, see if there is some memcpy pending
+		 * that seriously needs to start because of being stacked
+		 * up while we were choking the physical channels with data.
 		 */
 		list_for_each_entry(waiting, &pl08x->memcpy.channels,
 				    chan.device_node) {
@@ -1724,6 +1594,7 @@
 				ret = prep_phy_channel(waiting,
 						       waiting->waiting);
 				BUG_ON(ret);
+				waiting->phychan_hold--;
 				waiting->state = PL08X_CHAN_RUNNING;
 				waiting->waiting = NULL;
 				pl08x_issue_pending(&waiting->chan);
@@ -1732,7 +1603,25 @@
 		}
 	}
 
-	spin_unlock(&plchan->lock);
+	spin_unlock_irqrestore(&plchan->lock, flags);
+
+	if (txd) {
+		dma_async_tx_callback callback = txd->tx.callback;
+		void *callback_param = txd->tx.callback_param;
+
+		/* Don't try to unmap buffers on slave channels */
+		if (!plchan->slave)
+			pl08x_unmap_buffers(txd);
+
+		/* Free the descriptor */
+		spin_lock_irqsave(&plchan->lock, flags);
+		pl08x_free_txd(pl08x, txd);
+		spin_unlock_irqrestore(&plchan->lock, flags);
+
+		/* Callback to signal completion */
+		if (callback)
+			callback(callback_param);
+	}
 }
 
 static irqreturn_t pl08x_irq(int irq, void *dev)
@@ -1744,9 +1633,7 @@
 
 	val = readl(pl08x->base + PL080_ERR_STATUS);
 	if (val) {
-		/*
-		 * An error interrupt (on one or more channels)
-		 */
+		/* An error interrupt (on one or more channels) */
 		dev_err(&pl08x->adev->dev,
 			"%s error interrupt, register value 0x%08x\n",
 				__func__, val);
@@ -1770,9 +1657,7 @@
 			mask |= (1 << i);
 		}
 	}
-	/*
-	 * Clear only the terminal interrupts on channels we processed
-	 */
+	/* Clear only the terminal interrupts on channels we processed */
 	writel(mask, pl08x->base + PL080_TC_CLEAR);
 
 	return mask ? IRQ_HANDLED : IRQ_NONE;
@@ -1791,6 +1676,7 @@
 	int i;
 
 	INIT_LIST_HEAD(&dmadev->channels);
+
 	/*
 	 * Register as many many memcpy as we have physical channels,
 	 * we won't always be able to use all but the code will have
@@ -1819,16 +1705,23 @@
 				return -ENOMEM;
 			}
 		}
+		if (chan->cd->circular_buffer) {
+			dev_err(&pl08x->adev->dev,
+				"channel %s: circular buffers not supported\n",
+				chan->name);
+			kfree(chan);
+			continue;
+		}
 		dev_info(&pl08x->adev->dev,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
 		chan->chan.device = dmadev;
-		atomic_set(&chan->last_issued, 0);
-		chan->lc = atomic_read(&chan->last_issued);
+		chan->chan.cookie = 0;
+		chan->lc = 0;
 
 		spin_lock_init(&chan->lock);
-		INIT_LIST_HEAD(&chan->desc_list);
+		INIT_LIST_HEAD(&chan->pend_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
 
@@ -1898,7 +1791,7 @@
 	seq_printf(s, "CHANNEL:\tSTATE:\n");
 	seq_printf(s, "--------\t------\n");
 	list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
-		seq_printf(s, "%s\t\t\%s\n", chan->name,
+		seq_printf(s, "%s\t\t%s\n", chan->name,
 			   pl08x_state_str(chan->state));
 	}
 
@@ -1906,7 +1799,7 @@
 	seq_printf(s, "CHANNEL:\tSTATE:\n");
 	seq_printf(s, "--------\t------\n");
 	list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
-		seq_printf(s, "%s\t\t\%s\n", chan->name,
+		seq_printf(s, "%s\t\t%s\n", chan->name,
 			   pl08x_state_str(chan->state));
 	}
 
@@ -1942,7 +1835,7 @@
 static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
 {
 	struct pl08x_driver_data *pl08x;
-	struct vendor_data *vd = id->data;
+	const struct vendor_data *vd = id->data;
 	int ret = 0;
 	int i;
 
@@ -1990,6 +1883,14 @@
 	pl08x->adev = adev;
 	pl08x->vd = vd;
 
+	/* By default, AHB1 only.  If dualmaster, from platform */
+	pl08x->lli_buses = PL08X_AHB1;
+	pl08x->mem_buses = PL08X_AHB1;
+	if (pl08x->vd->dualmaster) {
+		pl08x->lli_buses = pl08x->pd->lli_buses;
+		pl08x->mem_buses = pl08x->pd->mem_buses;
+	}
+
 	/* A DMA memory pool for LLIs, align on 1-byte boundary */
 	pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
 			PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
@@ -2009,14 +1910,12 @@
 	/* Turn on the PL08x */
 	pl08x_ensure_on(pl08x);
 
-	/*
-	 * Attach the interrupt handler
-	 */
+	/* Attach the interrupt handler */
 	writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
 	writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
 
 	ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
-			  vd->name, pl08x);
+			  DRIVER_NAME, pl08x);
 	if (ret) {
 		dev_err(&adev->dev, "%s failed to request interrupt %d\n",
 			__func__, adev->irq[0]);
@@ -2087,8 +1986,9 @@
 
 	amba_set_drvdata(adev, pl08x);
 	init_pl08x_debugfs(pl08x);
-	dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n",
-		vd->name, adev->res.start);
+	dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
+		 amba_part(adev), amba_rev(adev),
+		 (unsigned long long)adev->res.start, adev->irq[0]);
 	return 0;
 
 out_no_slave_reg:
@@ -2115,13 +2015,11 @@
 
 /* PL080 has 8 channels and the PL080 have just 2 */
 static struct vendor_data vendor_pl080 = {
-	.name = "PL080",
 	.channels = 8,
 	.dualmaster = true,
 };
 
 static struct vendor_data vendor_pl081 = {
-	.name = "PL081",
 	.channels = 2,
 	.dualmaster = false,
 };
@@ -2160,7 +2058,7 @@
 	retval = amba_driver_register(&pl08x_amba_driver);
 	if (retval)
 		printk(KERN_WARNING DRIVER_NAME
-		       "failed to register as an amba device (%d)\n",
+		       "failed to register as an AMBA device (%d)\n",
 		       retval);
 	return retval;
 }
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ea0ee81..3d7d705 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -253,7 +253,7 @@
 	/* move myself to free_list */
 	list_move(&desc->desc_node, &atchan->free_list);
 
-	/* unmap dma addresses */
+	/* unmap dma addresses (not on slave channels) */
 	if (!atchan->chan_common.private) {
 		struct device *parent = chan2parent(&atchan->chan_common);
 		if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
@@ -583,7 +583,6 @@
 		desc->lli.ctrlb = ctrlb;
 
 		desc->txd.cookie = 0;
-		async_tx_ack(&desc->txd);
 
 		if (!first) {
 			first = desc;
@@ -604,7 +603,7 @@
 	/* set end-of-link to the last link descriptor of list*/
 	set_desc_eol(desc);
 
-	desc->txd.flags = flags; /* client is in control of this ack */
+	first->txd.flags = flags; /* client is in control of this ack */
 
 	return &first->txd;
 
@@ -670,7 +669,7 @@
 			if (!desc)
 				goto err_desc_get;
 
-			mem = sg_phys(sg);
+			mem = sg_dma_address(sg);
 			len = sg_dma_len(sg);
 			mem_width = 2;
 			if (unlikely(mem & 3 || len & 3))
@@ -712,7 +711,7 @@
 			if (!desc)
 				goto err_desc_get;
 
-			mem = sg_phys(sg);
+			mem = sg_dma_address(sg);
 			len = sg_dma_len(sg);
 			mem_width = 2;
 			if (unlikely(mem & 3 || len & 3))
@@ -749,8 +748,8 @@
 	first->txd.cookie = -EBUSY;
 	first->len = total_len;
 
-	/* last link descriptor of list is responsible of flags */
-	prev->txd.flags = flags; /* client is in control of this ack */
+	/* first link descriptor of list is responsible of flags */
+	first->txd.flags = flags; /* client is in control of this ack */
 
 	return &first->txd;
 
@@ -854,11 +853,11 @@
 
 	dev_vdbg(chan2dev(chan), "issue_pending\n");
 
+	spin_lock_bh(&atchan->lock);
 	if (!atc_chan_is_enabled(atchan)) {
-		spin_lock_bh(&atchan->lock);
 		atc_advance_work(atchan);
-		spin_unlock_bh(&atchan->lock);
 	}
+	spin_unlock_bh(&atchan->lock);
 }
 
 /**
@@ -1210,7 +1209,7 @@
 {
 	return platform_driver_probe(&at_dma_driver, at_dma_probe);
 }
-module_init(at_dma_init);
+subsys_initcall(at_dma_init);
 
 static void __exit at_dma_exit(void)
 {
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e5e172d2..4de947a 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1,7 +1,7 @@
 /*
  * Freescale MPC85xx, MPC83xx DMA Engine support
  *
- * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
  *
  * Author:
  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
@@ -1324,6 +1324,8 @@
 	fdev->common.device_control = fsl_dma_device_control;
 	fdev->common.dev = &op->dev;
 
+	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
+
 	dev_set_drvdata(&op->dev, fdev);
 
 	/*
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 7826638..798f46a 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -664,11 +664,20 @@
 	/*calculate CTL_LO*/
 	ctl_lo.ctl_lo = 0;
 	ctl_lo.ctlx.int_en = 1;
-	ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
-	ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
 	ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
 	ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
 
+	/*
+	 * Here we need some translation from "enum dma_slave_buswidth"
+	 * to the format for our dma controller
+	 *		standard	intel_mid_dmac's format
+	 *		 1 Byte			0b000
+	 *		 2 Bytes		0b001
+	 *		 4 Bytes		0b010
+	 */
+	ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
+	ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
+
 	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
 		ctl_lo.ctlx.tt_fc = 0;
 		ctl_lo.ctlx.sinc = 0;
@@ -746,8 +755,18 @@
 	BUG_ON(!mids);
 
 	if (!midc->dma->pimr_mask) {
-		pr_debug("MDMA: SG list is not supported by this controller\n");
-		return  NULL;
+		/* We can still handle sg list with only one item */
+		if (sg_len == 1) {
+			txd = intel_mid_dma_prep_memcpy(chan,
+						mids->dma_slave.dst_addr,
+						mids->dma_slave.src_addr,
+						sgl->length,
+						flags);
+			return txd;
+		} else {
+			pr_warn("MDMA: SG list is not supported by this controller\n");
+			return  NULL;
+		}
 	}
 
 	pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
@@ -758,6 +777,7 @@
 		pr_err("MDMA: Prep memcpy failed\n");
 		return NULL;
 	}
+
 	desc = to_intel_mid_dma_desc(txd);
 	desc->dirn = direction;
 	ctl_lo.ctl_lo = desc->ctl_lo;
@@ -1021,11 +1041,6 @@
 
 	/*DMA Interrupt*/
 	pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
-	if (!mid) {
-		pr_err("ERR_MDMA:null pointer mid\n");
-		return -EINVAL;
-	}
-
 	pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
 	tfr_status &= mid->intr_mask;
 	if (tfr_status) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 161c452..c6b01f5 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1261,7 +1261,7 @@
 	return err;
 }
 
-#ifdef CONFIG_MD_RAID6_PQ
+#ifdef CONFIG_RAID6_PQ
 static int __devinit
 iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
 {
@@ -1584,7 +1584,7 @@
 
 	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
 	    dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
-		#ifdef CONFIG_MD_RAID6_PQ
+		#ifdef CONFIG_RAID6_PQ
 		ret = iop_adma_pq_zero_sum_self_test(adev);
 		dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
 		#else
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c064c89..1c38418 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,6 +1,7 @@
 /*
  * Topcliff PCH DMA controller driver
  * Copyright (c) 2010 Intel Corporation
+ * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -921,12 +922,19 @@
 }
 
 /* PCI Device ID of DMA device */
-#define PCI_DEVICE_ID_PCH_DMA_8CH        0x8810
-#define PCI_DEVICE_ID_PCH_DMA_4CH        0x8815
+#define PCI_VENDOR_ID_ROHM             0x10DB
+#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
+#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
+#define PCI_DEVICE_ID_ML7213_DMA1_8CH	0x8026
+#define PCI_DEVICE_ID_ML7213_DMA2_8CH	0x802B
+#define PCI_DEVICE_ID_ML7213_DMA3_4CH	0x8034
 
 static const struct pci_device_id pch_dma_id_table[] = {
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
+	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
+	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
+	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
 	{ 0, },
 };
 
@@ -954,6 +962,7 @@
 module_init(pch_dma_init);
 module_exit(pch_dma_exit);
 
-MODULE_DESCRIPTION("Topcliff PCH DMA controller driver");
+MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+		   "DMA controller driver");
 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index fab68a5..6e1d46a 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,5 +1,6 @@
 /*
- * Copyright (C) ST-Ericsson SA 2007-2010
+ * Copyright (C) Ericsson AB 2007-2008
+ * Copyright (C) ST-Ericsson SA 2008-2010
  * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  * License terms: GNU General Public License (GPL) version 2
@@ -554,8 +555,66 @@
 	return d;
 }
 
-/* Support functions for logical channels */
+static int d40_psize_2_burst_size(bool is_log, int psize)
+{
+	if (is_log) {
+		if (psize == STEDMA40_PSIZE_LOG_1)
+			return 1;
+	} else {
+		if (psize == STEDMA40_PSIZE_PHY_1)
+			return 1;
+	}
 
+	return 2 << psize;
+}
+
+/*
+ * The dma only supports transmitting packages up to
+ * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
+ * dma elements required to send the entire sg list
+ */
+static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
+{
+	int dmalen;
+	u32 max_w = max(data_width1, data_width2);
+	u32 min_w = min(data_width1, data_width2);
+	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+
+	if (seg_max > STEDMA40_MAX_SEG_SIZE)
+		seg_max -= (1 << max_w);
+
+	if (!IS_ALIGNED(size, 1 << max_w))
+		return -EINVAL;
+
+	if (size <= seg_max)
+		dmalen = 1;
+	else {
+		dmalen = size / seg_max;
+		if (dmalen * seg_max < size)
+			dmalen++;
+	}
+	return dmalen;
+}
+
+static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
+			   u32 data_width1, u32 data_width2)
+{
+	struct scatterlist *sg;
+	int i;
+	int len = 0;
+	int ret;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		ret = d40_size_2_dmalen(sg_dma_len(sg),
+					data_width1, data_width2);
+		if (ret < 0)
+			return ret;
+		len += ret;
+	}
+	return len;
+}
+
+/* Support functions for logical channels */
 
 static int d40_channel_execute_command(struct d40_chan *d40c,
 				       enum d40_command command)
@@ -1241,6 +1300,21 @@
 		res = -EINVAL;
 	}
 
+	if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
+	    (1 << conf->src_info.data_width) !=
+	    d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
+	    (1 << conf->dst_info.data_width)) {
+		/*
+		 * The DMAC hardware only supports
+		 * src (burst x width) == dst (burst x width)
+		 */
+
+		dev_err(&d40c->chan.dev->device,
+			"[%s] src (burst x width) != dst (burst x width)\n",
+			__func__);
+		res = -EINVAL;
+	}
+
 	return res;
 }
 
@@ -1638,13 +1712,21 @@
 	if (d40d == NULL)
 		goto err;
 
-	d40d->lli_len = sgl_len;
+	d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
+					d40c->dma_cfg.src_info.data_width,
+					d40c->dma_cfg.dst_info.data_width);
+	if (d40d->lli_len < 0) {
+		dev_err(&d40c->chan.dev->device,
+			"[%s] Unaligned size\n", __func__);
+		goto err;
+	}
+
 	d40d->lli_current = 0;
 	d40d->txd.flags = dma_flags;
 
 	if (d40c->log_num != D40_PHY_CHAN) {
 
-		if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
+		if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
 			dev_err(&d40c->chan.dev->device,
 				"[%s] Out of memory\n", __func__);
 			goto err;
@@ -1654,15 +1736,17 @@
 					 sgl_len,
 					 d40d->lli_log.src,
 					 d40c->log_def.lcsp1,
-					 d40c->dma_cfg.src_info.data_width);
+					 d40c->dma_cfg.src_info.data_width,
+					 d40c->dma_cfg.dst_info.data_width);
 
 		(void) d40_log_sg_to_lli(sgl_dst,
 					 sgl_len,
 					 d40d->lli_log.dst,
 					 d40c->log_def.lcsp3,
-					 d40c->dma_cfg.dst_info.data_width);
+					 d40c->dma_cfg.dst_info.data_width,
+					 d40c->dma_cfg.src_info.data_width);
 	} else {
-		if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+		if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
 			dev_err(&d40c->chan.dev->device,
 				"[%s] Out of memory\n", __func__);
 			goto err;
@@ -1675,6 +1759,7 @@
 					virt_to_phys(d40d->lli_phy.src),
 					d40c->src_def_cfg,
 					d40c->dma_cfg.src_info.data_width,
+					d40c->dma_cfg.dst_info.data_width,
 					d40c->dma_cfg.src_info.psize);
 
 		if (res < 0)
@@ -1687,6 +1772,7 @@
 					virt_to_phys(d40d->lli_phy.dst),
 					d40c->dst_def_cfg,
 					d40c->dma_cfg.dst_info.data_width,
+					d40c->dma_cfg.src_info.data_width,
 					d40c->dma_cfg.dst_info.psize);
 
 		if (res < 0)
@@ -1826,7 +1912,6 @@
 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
 					     chan);
 	unsigned long flags;
-	int err = 0;
 
 	if (d40c->phy_chan == NULL) {
 		dev_err(&d40c->chan.dev->device,
@@ -1844,6 +1929,15 @@
 	}
 
 	d40d->txd.flags = dma_flags;
+	d40d->lli_len = d40_size_2_dmalen(size,
+					  d40c->dma_cfg.src_info.data_width,
+					  d40c->dma_cfg.dst_info.data_width);
+	if (d40d->lli_len < 0) {
+		dev_err(&d40c->chan.dev->device,
+			"[%s] Unaligned size\n", __func__);
+		goto err;
+	}
+
 
 	dma_async_tx_descriptor_init(&d40d->txd, chan);
 
@@ -1851,37 +1945,40 @@
 
 	if (d40c->log_num != D40_PHY_CHAN) {
 
-		if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
+		if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
 			dev_err(&d40c->chan.dev->device,
 				"[%s] Out of memory\n", __func__);
 			goto err;
 		}
-		d40d->lli_len = 1;
 		d40d->lli_current = 0;
 
-		d40_log_fill_lli(d40d->lli_log.src,
-				 src,
-				 size,
-				 d40c->log_def.lcsp1,
-				 d40c->dma_cfg.src_info.data_width,
-				 true);
+		if (d40_log_buf_to_lli(d40d->lli_log.src,
+				       src,
+				       size,
+				       d40c->log_def.lcsp1,
+				       d40c->dma_cfg.src_info.data_width,
+				       d40c->dma_cfg.dst_info.data_width,
+				       true) == NULL)
+			goto err;
 
-		d40_log_fill_lli(d40d->lli_log.dst,
-				 dst,
-				 size,
-				 d40c->log_def.lcsp3,
-				 d40c->dma_cfg.dst_info.data_width,
-				 true);
+		if (d40_log_buf_to_lli(d40d->lli_log.dst,
+				       dst,
+				       size,
+				       d40c->log_def.lcsp3,
+				       d40c->dma_cfg.dst_info.data_width,
+				       d40c->dma_cfg.src_info.data_width,
+				       true) == NULL)
+			goto err;
 
 	} else {
 
-		if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
+		if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
 			dev_err(&d40c->chan.dev->device,
 				"[%s] Out of memory\n", __func__);
 			goto err;
 		}
 
-		err = d40_phy_fill_lli(d40d->lli_phy.src,
+		if (d40_phy_buf_to_lli(d40d->lli_phy.src,
 				       src,
 				       size,
 				       d40c->dma_cfg.src_info.psize,
@@ -1889,11 +1986,11 @@
 				       d40c->src_def_cfg,
 				       true,
 				       d40c->dma_cfg.src_info.data_width,
-				       false);
-		if (err)
-			goto err_fill_lli;
+				       d40c->dma_cfg.dst_info.data_width,
+				       false) == NULL)
+			goto err;
 
-		err = d40_phy_fill_lli(d40d->lli_phy.dst,
+		if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
 				       dst,
 				       size,
 				       d40c->dma_cfg.dst_info.psize,
@@ -1901,10 +1998,9 @@
 				       d40c->dst_def_cfg,
 				       true,
 				       d40c->dma_cfg.dst_info.data_width,
-				       false);
-
-		if (err)
-			goto err_fill_lli;
+				       d40c->dma_cfg.src_info.data_width,
+				       false) == NULL)
+			goto err;
 
 		(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
 				      d40d->lli_pool.size, DMA_TO_DEVICE);
@@ -1913,9 +2009,6 @@
 	spin_unlock_irqrestore(&d40c->lock, flags);
 	return &d40d->txd;
 
-err_fill_lli:
-	dev_err(&d40c->chan.dev->device,
-		"[%s] Failed filling in PHY LLI\n", __func__);
 err:
 	if (d40d)
 		d40_desc_free(d40c, d40d);
@@ -1945,13 +2038,21 @@
 	dma_addr_t dev_addr = 0;
 	int total_size;
 
-	if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
+	d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
+					d40c->dma_cfg.src_info.data_width,
+					d40c->dma_cfg.dst_info.data_width);
+	if (d40d->lli_len < 0) {
+		dev_err(&d40c->chan.dev->device,
+			"[%s] Unaligned size\n", __func__);
+		return -EINVAL;
+	}
+
+	if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
 		dev_err(&d40c->chan.dev->device,
 			"[%s] Out of memory\n", __func__);
 		return -ENOMEM;
 	}
 
-	d40d->lli_len = sg_len;
 	d40d->lli_current = 0;
 
 	if (direction == DMA_FROM_DEVICE)
@@ -1993,13 +2094,21 @@
 	dma_addr_t dst_dev_addr;
 	int res;
 
-	if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+	d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
+					d40c->dma_cfg.src_info.data_width,
+					d40c->dma_cfg.dst_info.data_width);
+	if (d40d->lli_len < 0) {
+		dev_err(&d40c->chan.dev->device,
+			"[%s] Unaligned size\n", __func__);
+		return -EINVAL;
+	}
+
+	if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
 		dev_err(&d40c->chan.dev->device,
 			"[%s] Out of memory\n", __func__);
 		return -ENOMEM;
 	}
 
-	d40d->lli_len = sgl_len;
 	d40d->lli_current = 0;
 
 	if (direction == DMA_FROM_DEVICE) {
@@ -2024,6 +2133,7 @@
 				virt_to_phys(d40d->lli_phy.src),
 				d40c->src_def_cfg,
 				d40c->dma_cfg.src_info.data_width,
+				d40c->dma_cfg.dst_info.data_width,
 				d40c->dma_cfg.src_info.psize);
 	if (res < 0)
 		return res;
@@ -2035,6 +2145,7 @@
 				virt_to_phys(d40d->lli_phy.dst),
 				d40c->dst_def_cfg,
 				d40c->dma_cfg.dst_info.data_width,
+				d40c->dma_cfg.src_info.data_width,
 				d40c->dma_cfg.dst_info.psize);
 	if (res < 0)
 		return res;
@@ -2244,6 +2355,8 @@
 			psize = STEDMA40_PSIZE_PHY_8;
 		else if (config_maxburst >= 4)
 			psize = STEDMA40_PSIZE_PHY_4;
+		else if (config_maxburst >= 2)
+			psize = STEDMA40_PSIZE_PHY_2;
 		else
 			psize = STEDMA40_PSIZE_PHY_1;
 	}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 8557cb8..0b096a3 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) ST-Ericsson SA 2007-2010
- * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  * License terms: GNU General Public License (GPL) version 2
  */
@@ -122,15 +122,15 @@
 	*dst_cfg = dst;
 }
 
-int d40_phy_fill_lli(struct d40_phy_lli *lli,
-		     dma_addr_t data,
-		     u32 data_size,
-		     int psize,
-		     dma_addr_t next_lli,
-		     u32 reg_cfg,
-		     bool term_int,
-		     u32 data_width,
-		     bool is_device)
+static int d40_phy_fill_lli(struct d40_phy_lli *lli,
+			    dma_addr_t data,
+			    u32 data_size,
+			    int psize,
+			    dma_addr_t next_lli,
+			    u32 reg_cfg,
+			    bool term_int,
+			    u32 data_width,
+			    bool is_device)
 {
 	int num_elems;
 
@@ -139,13 +139,6 @@
 	else
 		num_elems = 2 << psize;
 
-	/*
-	 * Size is 16bit. data_width is 8, 16, 32 or 64 bit
-	 * Block large than 64 KiB must be split.
-	 */
-	if (data_size > (0xffff << data_width))
-		return -EINVAL;
-
 	/* Must be aligned */
 	if (!IS_ALIGNED(data, 0x1 << data_width))
 		return -EINVAL;
@@ -187,55 +180,118 @@
 	return 0;
 }
 
+static int d40_seg_size(int size, int data_width1, int data_width2)
+{
+	u32 max_w = max(data_width1, data_width2);
+	u32 min_w = min(data_width1, data_width2);
+	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+
+	if (seg_max > STEDMA40_MAX_SEG_SIZE)
+		seg_max -= (1 << max_w);
+
+	if (size <= seg_max)
+		return size;
+
+	if (size <= 2 * seg_max)
+		return ALIGN(size / 2, 1 << max_w);
+
+	return seg_max;
+}
+
+struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
+				       dma_addr_t addr,
+				       u32 size,
+				       int psize,
+				       dma_addr_t lli_phys,
+				       u32 reg_cfg,
+				       bool term_int,
+				       u32 data_width1,
+				       u32 data_width2,
+				       bool is_device)
+{
+	int err;
+	dma_addr_t next = lli_phys;
+	int size_rest = size;
+	int size_seg = 0;
+
+	do {
+		size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+		size_rest -= size_seg;
+
+		if (term_int && size_rest == 0)
+			next = 0;
+		else
+			next = ALIGN(next + sizeof(struct d40_phy_lli),
+				     D40_LLI_ALIGN);
+
+		err = d40_phy_fill_lli(lli,
+				       addr,
+				       size_seg,
+				       psize,
+				       next,
+				       reg_cfg,
+				       !next,
+				       data_width1,
+				       is_device);
+
+		if (err)
+			goto err;
+
+		lli++;
+		if (!is_device)
+			addr += size_seg;
+	} while (size_rest);
+
+	return lli;
+
+ err:
+	return NULL;
+}
+
 int d40_phy_sg_to_lli(struct scatterlist *sg,
 		      int sg_len,
 		      dma_addr_t target,
-		      struct d40_phy_lli *lli,
+		      struct d40_phy_lli *lli_sg,
 		      dma_addr_t lli_phys,
 		      u32 reg_cfg,
-		      u32 data_width,
+		      u32 data_width1,
+		      u32 data_width2,
 		      int psize)
 {
 	int total_size = 0;
 	int i;
 	struct scatterlist *current_sg = sg;
-	dma_addr_t next_lli_phys;
 	dma_addr_t dst;
-	int err = 0;
+	struct d40_phy_lli *lli = lli_sg;
+	dma_addr_t l_phys = lli_phys;
 
 	for_each_sg(sg, current_sg, sg_len, i) {
 
 		total_size += sg_dma_len(current_sg);
 
-		/* If this scatter list entry is the last one, no next link */
-		if (sg_len - 1 == i)
-			next_lli_phys = 0;
-		else
-			next_lli_phys = ALIGN(lli_phys + (i + 1) *
-					      sizeof(struct d40_phy_lli),
-					      D40_LLI_ALIGN);
-
 		if (target)
 			dst = target;
 		else
 			dst = sg_phys(current_sg);
 
-		err = d40_phy_fill_lli(&lli[i],
-				       dst,
-				       sg_dma_len(current_sg),
-				       psize,
-				       next_lli_phys,
-				       reg_cfg,
-				       !next_lli_phys,
-				       data_width,
-				       target == dst);
-		if (err)
-			goto err;
+		l_phys = ALIGN(lli_phys + (lli - lli_sg) *
+			       sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
+
+		lli = d40_phy_buf_to_lli(lli,
+					 dst,
+					 sg_dma_len(current_sg),
+					 psize,
+					 l_phys,
+					 reg_cfg,
+					 sg_len - 1 == i,
+					 data_width1,
+					 data_width2,
+					 target == dst);
+		if (lli == NULL)
+			return -EINVAL;
 	}
 
 	return total_size;
-err:
-	return err;
 }
 
 
@@ -315,17 +371,20 @@
 	writel(lli_dst->lcsp13, &lcla[1].lcsp13);
 }
 
-void d40_log_fill_lli(struct d40_log_lli *lli,
-		      dma_addr_t data, u32 data_size,
-		      u32 reg_cfg,
-		      u32 data_width,
-		      bool addr_inc)
+static void d40_log_fill_lli(struct d40_log_lli *lli,
+			     dma_addr_t data, u32 data_size,
+			     u32 reg_cfg,
+			     u32 data_width,
+			     bool addr_inc)
 {
 	lli->lcsp13 = reg_cfg;
 
 	/* The number of elements to transfer */
 	lli->lcsp02 = ((data_size >> data_width) <<
 		       D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
+
+	BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
+
 	/* 16 LSBs address of the current element */
 	lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
 	/* 16 MSBs address of the current element */
@@ -348,55 +407,94 @@
 	int total_size = 0;
 	struct scatterlist *current_sg = sg;
 	int i;
+	struct d40_log_lli *lli_src = lli->src;
+	struct d40_log_lli *lli_dst = lli->dst;
 
 	for_each_sg(sg, current_sg, sg_len, i) {
 		total_size += sg_dma_len(current_sg);
 
 		if (direction == DMA_TO_DEVICE) {
-			d40_log_fill_lli(&lli->src[i],
-					 sg_phys(current_sg),
-					 sg_dma_len(current_sg),
-					 lcsp->lcsp1, src_data_width,
-					 true);
-			d40_log_fill_lli(&lli->dst[i],
-					 dev_addr,
-					 sg_dma_len(current_sg),
-					 lcsp->lcsp3, dst_data_width,
-					 false);
+			lli_src =
+				d40_log_buf_to_lli(lli_src,
+						   sg_phys(current_sg),
+						   sg_dma_len(current_sg),
+						   lcsp->lcsp1, src_data_width,
+						   dst_data_width,
+						   true);
+			lli_dst =
+				d40_log_buf_to_lli(lli_dst,
+						   dev_addr,
+						   sg_dma_len(current_sg),
+						   lcsp->lcsp3, dst_data_width,
+						   src_data_width,
+						   false);
 		} else {
-			d40_log_fill_lli(&lli->dst[i],
-					 sg_phys(current_sg),
-					 sg_dma_len(current_sg),
-					 lcsp->lcsp3, dst_data_width,
-					 true);
-			d40_log_fill_lli(&lli->src[i],
-					 dev_addr,
-					 sg_dma_len(current_sg),
-					 lcsp->lcsp1, src_data_width,
-					 false);
+			lli_dst =
+				d40_log_buf_to_lli(lli_dst,
+						   sg_phys(current_sg),
+						   sg_dma_len(current_sg),
+						   lcsp->lcsp3, dst_data_width,
+						   src_data_width,
+						   true);
+			lli_src =
+				d40_log_buf_to_lli(lli_src,
+						   dev_addr,
+						   sg_dma_len(current_sg),
+						   lcsp->lcsp1, src_data_width,
+						   dst_data_width,
+						   false);
 		}
 	}
 	return total_size;
 }
 
+struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+				       dma_addr_t addr,
+				       int size,
+				       u32 lcsp13, /* src or dst*/
+				       u32 data_width1,
+				       u32 data_width2,
+				       bool addr_inc)
+{
+	struct d40_log_lli *lli = lli_sg;
+	int size_rest = size;
+	int size_seg = 0;
+
+	do {
+		size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+		size_rest -= size_seg;
+
+		d40_log_fill_lli(lli,
+				 addr,
+				 size_seg,
+				 lcsp13, data_width1,
+				 addr_inc);
+		if (addr_inc)
+			addr += size_seg;
+		lli++;
+	} while (size_rest);
+
+	return lli;
+}
+
 int d40_log_sg_to_lli(struct scatterlist *sg,
 		      int sg_len,
 		      struct d40_log_lli *lli_sg,
 		      u32 lcsp13, /* src or dst*/
-		      u32 data_width)
+		      u32 data_width1, u32 data_width2)
 {
 	int total_size = 0;
 	struct scatterlist *current_sg = sg;
 	int i;
+	struct d40_log_lli *lli = lli_sg;
 
 	for_each_sg(sg, current_sg, sg_len, i) {
 		total_size += sg_dma_len(current_sg);
-
-		d40_log_fill_lli(&lli_sg[i],
-				 sg_phys(current_sg),
-				 sg_dma_len(current_sg),
-				 lcsp13, data_width,
-				 true);
+		lli = d40_log_buf_to_lli(lli,
+					 sg_phys(current_sg),
+					 sg_dma_len(current_sg),
+					 lcsp13,
+					 data_width1, data_width2, true);
 	}
 	return total_size;
 }
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9e419b9..9cc4349 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -292,18 +292,20 @@
 		      struct d40_phy_lli *lli,
 		      dma_addr_t lli_phys,
 		      u32 reg_cfg,
-		      u32 data_width,
+		      u32 data_width1,
+		      u32 data_width2,
 		      int psize);
 
-int d40_phy_fill_lli(struct d40_phy_lli *lli,
-		     dma_addr_t data,
-		     u32 data_size,
-		     int psize,
-		     dma_addr_t next_lli,
-		     u32 reg_cfg,
-		     bool term_int,
-		     u32 data_width,
-		     bool is_device);
+struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
+				       dma_addr_t data,
+				       u32 data_size,
+				       int psize,
+				       dma_addr_t next_lli,
+				       u32 reg_cfg,
+				       bool term_int,
+				       u32 data_width1,
+				       u32 data_width2,
+				       bool is_device);
 
 void d40_phy_lli_write(void __iomem *virtbase,
 		       u32 phy_chan_num,
@@ -312,12 +314,12 @@
 
 /* Logical channels */
 
-void d40_log_fill_lli(struct d40_log_lli *lli,
-		      dma_addr_t data,
-		      u32 data_size,
-		      u32 reg_cfg,
-		      u32 data_width,
-		      bool addr_inc);
+struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+				       dma_addr_t addr,
+				       int size,
+				       u32 lcsp13, /* src or dst*/
+				       u32 data_width1, u32 data_width2,
+				       bool addr_inc);
 
 int d40_log_sg_to_dev(struct scatterlist *sg,
 		      int sg_len,
@@ -332,7 +334,7 @@
 		      int sg_len,
 		      struct d40_log_lli *lli_sg,
 		      u32 lcsp13, /* src or dst*/
-		      u32 data_width);
+		      u32 data_width1, u32 data_width2);
 
 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
 			    struct d40_log_lli *lli_dst,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0307d60..5c4f9b9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,25 +607,6 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_fini);
 
-void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
-{
-	info->fix.type = FB_TYPE_PACKED_PIXELS;
-	info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
-		FB_VISUAL_TRUECOLOR;
-	info->fix.mmio_start = 0;
-	info->fix.mmio_len = 0;
-	info->fix.type_aux = 0;
-	info->fix.xpanstep = 1; /* doing it in hw */
-	info->fix.ypanstep = 1; /* doing it in hw */
-	info->fix.ywrapstep = 0;
-	info->fix.accel = FB_ACCEL_NONE;
-	info->fix.type_aux = 0;
-
-	info->fix.line_length = fb->pitch;
-	return;
-}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -835,7 +816,6 @@
 			mutex_unlock(&dev->mode_config.mutex);
 			return ret;
 		}
-		drm_fb_helper_fill_fix(info, fb_helper->fb);
 	}
 	mutex_unlock(&dev->mode_config.mutex);
 
@@ -973,7 +953,6 @@
 
 	if (new_fb) {
 		info->var.pixclock = 0;
-		drm_fb_helper_fill_fix(info, fb_helper->fb);
 		if (register_framebuffer(info) < 0) {
 			return -EINVAL;
 		}
@@ -1000,6 +979,26 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
 
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+			    uint32_t depth)
+{
+	info->fix.type = FB_TYPE_PACKED_PIXELS;
+	info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+		FB_VISUAL_TRUECOLOR;
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
+	info->fix.type_aux = 0;
+	info->fix.xpanstep = 1; /* doing it in hw */
+	info->fix.ypanstep = 1; /* doing it in hw */
+	info->fix.ywrapstep = 0;
+	info->fix.accel = FB_ACCEL_NONE;
+	info->fix.type_aux = 0;
+
+	info->fix.line_length = pitch;
+	return;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height)
 {
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ee145a2..5127827 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,6 +148,7 @@
 
 //	memset(info->screen_base, 0, size);
 
+	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	info->pixmap.size = 64*1024;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 46e3257..01bffc4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -160,6 +160,7 @@
 #define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
 #define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
 #define NVOBJ_FLAG_VM			(1 << 3)
+#define NVOBJ_FLAG_VM_USER		(1 << 4)
 
 #define NVOBJ_CINST_GLOBAL	0xdeadbeef
 
@@ -1576,6 +1577,20 @@
 		dev->pdev->subsystem_device == sub_device;
 }
 
+/* returns 1 if device is one of the nv4x using the 0x4497 object class,
+ * helpful to determine a number of other hardware features
+ */
+static inline int
+nv44_graph_class(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if ((dev_priv->chipset & 0xf0) == 0x60)
+		return 1;
+
+	return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
+}
+
 /* memory type/access flags, do not match hardware values */
 #define NV_MEM_ACCESS_RO  1
 #define NV_MEM_ACCESS_WO  2
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a26d047..60769d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -352,13 +352,14 @@
 			      FBINFO_HWACCEL_IMAGEBLIT;
 	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &nouveau_fbcon_sw_ops;
-	info->fix.smem_start = dev->mode_config.fb_base +
-			       (nvbo->bo.mem.start << PAGE_SHIFT);
+	info->fix.smem_start = nvbo->bo.mem.bus.base +
+			       nvbo->bo.mem.bus.offset;
 	info->fix.smem_len = size;
 
 	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
 	info->screen_size = size;
 
+	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* Set aperture base/size for vesafb takeover */
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 69044eb..26347b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -742,30 +742,24 @@
 {
 	struct nouveau_mm *mm = man->priv;
 	struct nouveau_mm_node *r;
-	u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
-	int i;
+	u32 total = 0, free = 0;
 
 	mutex_lock(&mm->mutex);
 	list_for_each_entry(r, &mm->nodes, nl_entry) {
-		printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
-		       prefix, r->free ? "free" : "used", r->type,
-		       ((u64)r->offset << 12),
+		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
+		       prefix, r->type, ((u64)r->offset << 12),
 		       (((u64)r->offset + r->length) << 12));
+
 		total += r->length;
-		ttotal[r->type] += r->length;
-		if (r->free)
-			tfree[r->type] += r->length;
-		else
-			tused[r->type] += r->length;
+		if (!r->type)
+			free += r->length;
 	}
 	mutex_unlock(&mm->mutex);
 
-	printk(KERN_DEBUG "%s  total: 0x%010llx\n", prefix, total << 12);
-	for (i = 0; i < 3; i++) {
-		printk(KERN_DEBUG "%s type %d: 0x%010llx, "
-				  "used 0x%010llx, free 0x%010llx\n", prefix,
-		       i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
-	}
+	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
+	       prefix, (u64)total << 12, (u64)free << 12);
+	printk(KERN_DEBUG "%s  block: 0x%08x\n",
+	       prefix, mm->block_size << 12);
 }
 
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index cdbb11e..8844b50 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -48,175 +48,76 @@
 
 	b->offset = a->offset;
 	b->length = size;
-	b->free   = a->free;
 	b->type   = a->type;
 	a->offset += size;
 	a->length -= size;
 	list_add_tail(&b->nl_entry, &a->nl_entry);
-	if (b->free)
+	if (b->type == 0)
 		list_add_tail(&b->fl_entry, &a->fl_entry);
 	return b;
 }
 
-static struct nouveau_mm_node *
-nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
-{
-	struct nouveau_mm_node *prev, *next;
-
-	/* try to merge with free adjacent entries of same type */
-	prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
-	if (this->nl_entry.prev != &rmm->nodes) {
-		if (prev->free && prev->type == this->type) {
-			prev->length += this->length;
-			region_put(rmm, this);
-			this = prev;
-		}
-	}
-
-	next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
-	if (this->nl_entry.next != &rmm->nodes) {
-		if (next->free && next->type == this->type) {
-			next->offset  = this->offset;
-			next->length += this->length;
-			region_put(rmm, this);
-			this = next;
-		}
-	}
-
-	return this;
-}
+#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
+	list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
 
 void
 nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
 {
-	u32 block_s, block_l;
+	struct nouveau_mm_node *prev = node(this, prev);
+	struct nouveau_mm_node *next = node(this, next);
 
-	this->free = true;
 	list_add(&this->fl_entry, &rmm->free);
-	this = nouveau_mm_merge(rmm, this);
-
-	/* any entirely free blocks now?  we'll want to remove typing
-	 * on them now so they can be use for any memory allocation
-	 */
-	block_s = roundup(this->offset, rmm->block_size);
-	if (block_s + rmm->block_size > this->offset + this->length)
-		return;
-
-	/* split off any still-typed region at the start */
-	if (block_s != this->offset) {
-		if (!region_split(rmm, this, block_s - this->offset))
-			return;
-	}
-
-	/* split off the soon-to-be-untyped block(s) */
-	block_l = rounddown(this->length, rmm->block_size);
-	if (block_l != this->length) {
-		this = region_split(rmm, this, block_l);
-		if (!this)
-			return;
-	}
-
-	/* mark as having no type, and retry merge with any adjacent
-	 * untyped blocks
-	 */
 	this->type = 0;
-	nouveau_mm_merge(rmm, this);
+
+	if (prev && prev->type == 0) {
+		prev->length += this->length;
+		region_put(rmm, this);
+		this = prev;
+	}
+
+	if (next && next->type == 0) {
+		next->offset  = this->offset;
+		next->length += this->length;
+		region_put(rmm, this);
+	}
 }
 
 int
 nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
 	       u32 align, struct nouveau_mm_node **pnode)
 {
-	struct nouveau_mm_node *this, *tmp, *next;
-	u32 splitoff, avail, alloc;
+	struct nouveau_mm_node *prev, *this, *next;
+	u32 min = size_nc ? size_nc : size;
+	u32 align_mask = align - 1;
+	u32 splitoff;
+	u32 s, e;
 
-	list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
-		next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
-		if (this->nl_entry.next == &rmm->nodes)
-			next = NULL;
+	list_for_each_entry(this, &rmm->free, fl_entry) {
+		e = this->offset + this->length;
+		s = this->offset;
 
-		/* skip wrongly typed blocks */
-		if (this->type && this->type != type)
+		prev = node(this, prev);
+		if (prev && prev->type != type)
+			s = roundup(s, rmm->block_size);
+
+		next = node(this, next);
+		if (next && next->type != type)
+			e = rounddown(e, rmm->block_size);
+
+		s  = (s + align_mask) & ~align_mask;
+		e &= ~align_mask;
+		if (s > e || e - s < min)
 			continue;
 
-		/* account for alignment */
-		splitoff = this->offset & (align - 1);
-		if (splitoff)
-			splitoff = align - splitoff;
-
-		if (this->length <= splitoff)
-			continue;
-
-		/* determine total memory available from this, and
-		 * the next block (if appropriate)
-		 */
-		avail = this->length;
-		if (next && next->free && (!next->type || next->type == type))
-			avail += next->length;
-
-		avail -= splitoff;
-
-		/* determine allocation size */
-		if (size_nc) {
-			alloc = min(avail, size);
-			alloc = rounddown(alloc, size_nc);
-			if (alloc == 0)
-				continue;
-		} else {
-			alloc = size;
-			if (avail < alloc)
-				continue;
-		}
-
-		/* untyped block, split off a chunk that's a multiple
-		 * of block_size and type it
-		 */
-		if (!this->type) {
-			u32 block = roundup(alloc + splitoff, rmm->block_size);
-			if (this->length < block)
-				continue;
-
-			this = region_split(rmm, this, block);
-			if (!this)
-				return -ENOMEM;
-
-			this->type = type;
-		}
-
-		/* stealing memory from adjacent block */
-		if (alloc > this->length) {
-			u32 amount = alloc - (this->length - splitoff);
-
-			if (!next->type) {
-				amount = roundup(amount, rmm->block_size);
-
-				next = region_split(rmm, next, amount);
-				if (!next)
-					return -ENOMEM;
-
-				next->type = type;
-			}
-
-			this->length += amount;
-			next->offset += amount;
-			next->length -= amount;
-			if (!next->length) {
-				list_del(&next->nl_entry);
-				list_del(&next->fl_entry);
-				kfree(next);
-			}
-		}
-
-		if (splitoff) {
-			if (!region_split(rmm, this, splitoff))
-				return -ENOMEM;
-		}
-
-		this = region_split(rmm, this, alloc);
-		if (this == NULL)
+		splitoff = s - this->offset;
+		if (splitoff && !region_split(rmm, this, splitoff))
 			return -ENOMEM;
 
-		this->free = false;
+		this = region_split(rmm, this, min(size, e - s));
+		if (!this)
+			return -ENOMEM;
+
+		this->type = type;
 		list_del(&this->fl_entry);
 		*pnode = this;
 		return 0;
@@ -234,7 +135,6 @@
 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 	if (!heap)
 		return -ENOMEM;
-	heap->free = true;
 	heap->offset = roundup(offset, block);
 	heap->length = rounddown(offset + length, block) - heap->offset;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index af38449..798eaf3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -30,9 +30,7 @@
 	struct list_head fl_entry;
 	struct list_head rl_entry;
 
-	bool free;
-	int  type;
-
+	u8  type;
 	u32 offset;
 	u32 length;
 };
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 19ef92a..8870d72 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -451,8 +451,7 @@
 	NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
 
 	/* curie */
-	if (dev_priv->chipset >= 0x60 ||
-	    0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
+	if (nv44_graph_class(dev))
 		NVOBJ_CLASS(dev, 0x4497, GR);
 	else
 		NVOBJ_CLASS(dev, 0x4097, GR);
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index ce58509..f70447d 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -118,17 +118,6 @@
  */
 
 static int
-nv40_graph_4097(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if ((dev_priv->chipset & 0xf0) == 0x60)
-		return 0;
-
-	return !!(0x0baf & (1 << dev_priv->chipset));
-}
-
-static int
 nv40_graph_vs_count(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -219,7 +208,7 @@
 		gr_def(ctx, 0x4009dc, 0x80000000);
 	} else {
 		cp_ctx(ctx, 0x400840, 20);
-		if (!nv40_graph_4097(ctx->dev)) {
+		if (nv44_graph_class(ctx->dev)) {
 			for (i = 0; i < 8; i++)
 				gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
 		}
@@ -228,7 +217,7 @@
 		gr_def(ctx, 0x400888, 0x00000040);
 		cp_ctx(ctx, 0x400894, 11);
 		gr_def(ctx, 0x400894, 0x00000040);
-		if (nv40_graph_4097(ctx->dev)) {
+		if (!nv44_graph_class(ctx->dev)) {
 			for (i = 0; i < 8; i++)
 				gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
 		}
@@ -546,7 +535,7 @@
 static void
 nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
 {
-	int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
+	int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
 
 	cp_out (ctx, 0x300000);
 	cp_lsr (ctx, len - 4);
@@ -582,11 +571,11 @@
 	} else {
 		b0_offset = 0x1d40/4; /* 2200 */
 		b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
-		vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
+		vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
 	}
 
 	cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
-	cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
+	cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
 
 	offset = ctx->ctxvals_pos;
 	ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
index e4e72c1..03c0d4c 100644
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -6,27 +6,17 @@
 int
 nv40_mc_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t tmp;
-
 	/* Power up everything, resetting each individual unit will
 	 * be done later if needed.
 	 */
 	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
 
-	switch (dev_priv->chipset) {
-	case 0x44:
-	case 0x46: /* G72 */
-	case 0x4e:
-	case 0x4c: /* C51_G7X */
-		tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+	if (nv44_graph_class(dev)) {
+		u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
 		nv_wr32(dev, NV40_PMC_1700, tmp);
 		nv_wr32(dev, NV40_PMC_1704, 0);
 		nv_wr32(dev, NV40_PMC_1708, 0);
 		nv_wr32(dev, NV40_PMC_170C, tmp);
-		break;
-	default:
-		break;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 2e1b1cd..ea00418 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -332,8 +332,11 @@
 	gpuobj->vinst = node->vram->offset;
 
 	if (gpuobj->flags & NVOBJ_FLAG_VM) {
-		ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
-				     NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+		u32 flags = NV_MEM_ACCESS_RW;
+		if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
+			flags |= NV_MEM_ACCESS_SYS;
+
+		ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
 				     &node->chan_vma);
 		if (ret) {
 			vram->put(dev, &node->vram);
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 5feacd5..e6ea7d8 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -105,7 +105,8 @@
 	if (ret)
 		return ret;
 
-	ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
+	ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
+				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
 				 &grch->unk418810);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 4b9251b..e4e83c2 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -48,8 +48,8 @@
 	phys >>= 8;
 
 	phys |= 0x00000001; /* present */
-//	if (vma->access & NV_MEM_ACCESS_SYS)
-//		phys |= 0x00000002;
+	if (vma->access & NV_MEM_ACCESS_SYS)
+		phys |= 0x00000002;
 
 	phys |= ((u64)target  << 32);
 	phys |= ((u64)memtype << 36);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7fe8ebd..a8973ac 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3002,31 +3002,6 @@
 	return 0;
 }
 
-static bool evergreen_card_posted(struct radeon_device *rdev)
-{
-	u32 reg;
-
-	/* first check CRTCs */
-	if (rdev->flags & RADEON_IS_IGP)
-		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-	else
-		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
-	if (reg & EVERGREEN_CRTC_MASTER_EN)
-		return true;
-
-	/* then check MEM_SIZE, in case the crtcs are off */
-	if (RREG32(CONFIG_MEMSIZE))
-		return true;
-
-	return false;
-}
-
 /* Plan is to move initialization in that function and use
  * helper function so that radeon_device_init pretty much
  * do nothing more than calling asic specific function. This
@@ -3063,7 +3038,7 @@
 	if (radeon_asic_reset(rdev))
 		dev_warn(rdev->dev, "GPU reset failed !\n");
 	/* Post card if necessary */
-	if (!evergreen_card_posted(rdev)) {
+	if (!radeon_card_posted(rdev)) {
 		if (!rdev->bios) {
 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
@@ -3158,6 +3133,9 @@
 {
 	u32 link_width_cntl, speed_cntl;
 
+	if (radeon_pcie_gen2 == 0)
+		return;
+
 	if (rdev->flags & RADEON_IS_IGP)
 		return;
 
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f637595..46da514 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2086,12 +2086,13 @@
 {
 	struct r100_mc_save save;
 	u32 status, tmp;
+	int ret = 0;
 
-	r100_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	if (!G_000E40_GUI_ACTIVE(status)) {
 		return 0;
 	}
+	r100_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
 	/* stop CP */
@@ -2131,11 +2132,11 @@
 		G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
 		dev_err(rdev->dev, "failed to reset GPU\n");
 		rdev->gpu_lockup = true;
-		return -1;
-	}
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
 	r100_mc_resume(rdev, &save);
-	dev_info(rdev->dev, "GPU reset succeed\n");
-	return 0;
+	return ret;
 }
 
 void r100_set_common_regs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fae5e70..cf862ca 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -405,12 +405,13 @@
 {
 	struct r100_mc_save save;
 	u32 status, tmp;
+	int ret = 0;
 
-	r100_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	if (!G_000E40_GUI_ACTIVE(status)) {
 		return 0;
 	}
+	r100_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
 	/* stop CP */
@@ -451,11 +452,11 @@
 	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
 		dev_err(rdev->dev, "failed to reset GPU\n");
 		rdev->gpu_lockup = true;
-		return -1;
-	}
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
 	r100_mc_resume(rdev, &save);
-	dev_info(rdev->dev, "GPU reset succeed\n");
-	return 0;
+	return ret;
 }
 
 /*
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6b50716..aca2236 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2358,24 +2358,6 @@
 	/* FIXME: implement */
 }
 
-
-bool r600_card_posted(struct radeon_device *rdev)
-{
-	uint32_t reg;
-
-	/* first check CRTCs */
-	reg = RREG32(D1CRTC_CONTROL) |
-		RREG32(D2CRTC_CONTROL);
-	if (reg & CRTC_EN)
-		return true;
-
-	/* then check MEM_SIZE, in case the crtcs are off */
-	if (RREG32(CONFIG_MEMSIZE))
-		return true;
-
-	return false;
-}
-
 int r600_startup(struct radeon_device *rdev)
 {
 	int r;
@@ -2536,7 +2518,7 @@
 	if (r)
 		return r;
 	/* Post card if necessary */
-	if (!r600_card_posted(rdev)) {
+	if (!radeon_card_posted(rdev)) {
 		if (!rdev->bios) {
 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
@@ -3658,6 +3640,9 @@
 	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
 	u16 link_cntl2;
 
+	if (radeon_pcie_gen2 == 0)
+		return;
+
 	if (rdev->flags & RADEON_IS_IGP)
 		return;
 
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e948663..71d2a55 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -92,6 +92,7 @@
 extern int radeon_audio;
 extern int radeon_disp_priority;
 extern int radeon_hw_i2c;
+extern int radeon_pcie_gen2;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index be5cb4f..d5680a0 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -104,6 +104,7 @@
 int radeon_audio = 1;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
+int radeon_pcie_gen2 = 0;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -147,6 +148,9 @@
 MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
 module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
 
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
+module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+
 static int radeon_suspend(struct drm_device *dev, pm_message_t state)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ca32e9c..66324b5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -225,6 +225,8 @@
 
 	strcpy(info->fix.id, "radeondrmfb");
 
+	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &radeonfb_ops;
 
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index ac40fd3..9177f91 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -439,7 +439,7 @@
 0x000286EC SPI_COMPUTE_NUM_THREAD_X
 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
-0x000286F8 GDS_ADDR_SIZE
+0x00028724 GDS_ADDR_SIZE
 0x00028780 CB_BLEND0_CONTROL
 0x00028784 CB_BLEND1_CONTROL
 0x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b4192ac..5afe294 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -339,16 +339,16 @@
 
 int rs600_asic_reset(struct radeon_device *rdev)
 {
-	u32 status, tmp;
-
 	struct rv515_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
 
-	/* Stops all mc clients */
-	rv515_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	if (!G_000E40_GUI_ACTIVE(status)) {
 		return 0;
 	}
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
 	/* stop CP */
@@ -392,11 +392,11 @@
 	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
 		dev_err(rdev->dev, "failed to reset GPU\n");
 		rdev->gpu_lockup = true;
-		return -1;
-	}
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
 	rv515_mc_resume(rdev, &save);
-	dev_info(rdev->dev, "GPU reset succeed\n");
-	return 0;
+	return ret;
 }
 
 /*
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3a264aa..491dc90 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1268,7 +1268,7 @@
 	if (r)
 		return r;
 	/* Post card if necessary */
-	if (!r600_card_posted(rdev)) {
+	if (!radeon_card_posted(rdev)) {
 		if (!rdev->bios) {
 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
@@ -1372,6 +1372,9 @@
 	u32 link_width_cntl, lanes, speed_cntl, tmp;
 	u16 link_cntl2;
 
+	if (radeon_pcie_gen2 == 0)
+		return;
+
 	if (rdev->flags & RADEON_IS_IGP)
 		return;
 
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c7db698..f0bd5bc 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -196,88 +196,60 @@
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->suspend ? pm->suspend(dev) : 0;
-	}
-
-	return i2c_legacy_suspend(dev, PMSG_SUSPEND);
+	if (pm)
+		return pm_generic_suspend(dev);
+	else
+		return i2c_legacy_suspend(dev, PMSG_SUSPEND);
 }
 
 static int i2c_device_pm_resume(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	int ret;
 
 	if (pm)
-		ret = pm->resume ? pm->resume(dev) : 0;
+		return pm_generic_resume(dev);
 	else
-		ret = i2c_legacy_resume(dev);
-
-	return ret;
+		return i2c_legacy_resume(dev);
 }
 
 static int i2c_device_pm_freeze(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->freeze ? pm->freeze(dev) : 0;
-	}
-
-	return i2c_legacy_suspend(dev, PMSG_FREEZE);
+	if (pm)
+		return pm_generic_freeze(dev);
+	else
+		return i2c_legacy_suspend(dev, PMSG_FREEZE);
 }
 
 static int i2c_device_pm_thaw(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->thaw ? pm->thaw(dev) : 0;
-	}
-
-	return i2c_legacy_resume(dev);
+	if (pm)
+		return pm_generic_thaw(dev);
+	else
+		return i2c_legacy_resume(dev);
 }
 
 static int i2c_device_pm_poweroff(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->poweroff ? pm->poweroff(dev) : 0;
-	}
-
-	return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
+	if (pm)
+		return pm_generic_poweroff(dev);
+	else
+		return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
 }
 
 static int i2c_device_pm_restore(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	int ret;
 
 	if (pm)
-		ret = pm->restore ? pm->restore(dev) : 0;
+		return pm_generic_restore(dev);
 	else
-		ret = i2c_legacy_resume(dev);
-
-	if (!ret) {
-		pm_runtime_disable(dev);
-		pm_runtime_set_active(dev);
-		pm_runtime_enable(dev);
-	}
-
-	return ret;
+		return i2c_legacy_resume(dev);
 }
 #else /* !CONFIG_PM_SLEEP */
 #define i2c_device_pm_suspend	NULL
@@ -1021,6 +993,14 @@
 static int __unregister_client(struct device *dev, void *dummy)
 {
 	struct i2c_client *client = i2c_verify_client(dev);
+	if (client && strcmp(client->name, "dummy"))
+		i2c_unregister_device(client);
+	return 0;
+}
+
+static int __unregister_dummy(struct device *dev, void *dummy)
+{
+	struct i2c_client *client = i2c_verify_client(dev);
 	if (client)
 		i2c_unregister_device(client);
 	return 0;
@@ -1075,8 +1055,12 @@
 	mutex_unlock(&adap->userspace_clients_lock);
 
 	/* Detach any active clients. This can't fail, thus we do not
-	   checking the returned value. */
+	 * check the returned value. This is a two-pass process, because
+	 * we can't remove the dummy devices during the first pass: they
+	 * could have been instantiated by real devices wishing to clean
+	 * them up properly, so we give them a chance to do that first. */
 	res = device_for_each_child(&adap->dev, NULL, __unregister_client);
+	res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
 
 #ifdef CONFIG_I2C_COMPAT
 	class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
@@ -1140,6 +1124,14 @@
 	if (res)
 		return res;
 
+	/* Drivers should switch to dev_pm_ops instead. */
+	if (driver->suspend)
+		pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
+			driver->driver.name);
+	if (driver->resume)
+		pr_warn("i2c-core: driver [%s] using legacy resume method\n",
+			driver->driver.name);
+
 	pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
 
 	INIT_LIST_HEAD(&driver->clients);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index dffa0ac..38e4eb1 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -350,6 +350,7 @@
 	if (!d->dm_dev.bdev)
 		return;
 
+	bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
 	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
 	d->dm_dev.bdev = NULL;
 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cf8594c..b76cfc8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1912,6 +1912,7 @@
 		MD_BUG();
 		return;
 	}
+	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
 	list_del_rcu(&rdev->same_set);
 	printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
 	rdev->mddev = NULL;
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index fa19d84..dd84124f 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -13,6 +13,7 @@
  * your option) any later version.
  */
 
+#include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -20,8 +21,12 @@
 #include <linux/delay.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/mmc/host.h>
+#ifdef CONFIG_PPC
 #include <asm/machdep.h>
+#endif
 #include "sdhci-of.h"
 #include "sdhci.h"
 
@@ -112,7 +117,11 @@
 		return true;
 
 	/* Old device trees don't have the wp-inverted property. */
+#ifdef CONFIG_PPC
 	return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
+#else
+	return false;
+#endif
 }
 
 static int __devinit sdhci_of_probe(struct platform_device *ofdev,
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index b1f7689..7741470 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -53,9 +53,10 @@
 	  devices. Partitioning on NFTL 'devices' is a different - that's the
 	  'normal' form of partitioning used on a block device.
 
+if MTD_PARTITIONS
+
 config MTD_REDBOOT_PARTS
 	tristate "RedBoot partition table parsing"
-	depends on MTD_PARTITIONS
 	---help---
 	  RedBoot is a ROM monitor and bootloader which deals with multiple
 	  'images' in flash devices by putting a table one of the erase
@@ -72,9 +73,10 @@
 	  SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
 	  example.
 
+if MTD_REDBOOT_PARTS
+
 config MTD_REDBOOT_DIRECTORY_BLOCK
 	int "Location of RedBoot partition table"
-	depends on MTD_REDBOOT_PARTS
 	default "-1"
 	---help---
 	  This option is the Linux counterpart to the
@@ -91,18 +93,18 @@
 
 config MTD_REDBOOT_PARTS_UNALLOCATED
 	bool "Include unallocated flash regions"
-	depends on MTD_REDBOOT_PARTS
 	help
 	  If you need to register each unallocated flash region as a MTD
 	  'partition', enable this option.
 
 config MTD_REDBOOT_PARTS_READONLY
 	bool "Force read-only for RedBoot system images"
-	depends on MTD_REDBOOT_PARTS
 	help
 	  If you need to force read-only for 'RedBoot', 'RedBoot Config' and
 	  'FIS directory' images, enable this option.
 
+endif # MTD_REDBOOT_PARTS
+
 config MTD_CMDLINE_PARTS
 	bool "Command line partition table parsing"
 	depends on MTD_PARTITIONS = "y" && MTD = "y"
@@ -142,7 +144,7 @@
 
 config MTD_AFS_PARTS
 	tristate "ARM Firmware Suite partition parsing"
-	depends on ARM && MTD_PARTITIONS
+	depends on ARM
 	---help---
 	  The ARM Firmware Suite allows the user to divide flash devices into
 	  multiple 'images'. Each such image has a header containing its name
@@ -158,8 +160,8 @@
 	  example.
 
 config MTD_OF_PARTS
-	tristate "Flash partition map based on OF description"
-	depends on OF && MTD_PARTITIONS
+	def_bool y
+	depends on OF
 	help
 	  This provides a partition parsing function which derives
 	  the partition map from the children of the flash node,
@@ -167,10 +169,11 @@
 
 config MTD_AR7_PARTS
 	tristate "TI AR7 partitioning support"
-	depends on MTD_PARTITIONS
 	---help---
 	  TI AR7 partitioning support
 
+endif # MTD_PARTITIONS
+
 comment "User Modules And Translation Layers"
 
 config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 760abc5..d4e7f25 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -6,13 +6,13 @@
 obj-$(CONFIG_MTD)		+= mtd.o
 mtd-y				:= mtdcore.o mtdsuper.o
 mtd-$(CONFIG_MTD_PARTITIONS)	+= mtdpart.o
+mtd-$(CONFIG_MTD_OF_PARTS)	+= ofpart.o
 
 obj-$(CONFIG_MTD_CONCAT)	+= mtdconcat.o
 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
 obj-$(CONFIG_MTD_AFS_PARTS)	+= afs.o
 obj-$(CONFIG_MTD_AR7_PARTS)	+= ar7part.o
-obj-$(CONFIG_MTD_OF_PARTS)      += ofpart.o
 
 # 'Users' - code which presents functionality to userspace.
 obj-$(CONFIG_MTD_CHAR)		+= mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index ad9268b..a8c3e1c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -162,7 +162,7 @@
 #endif
 
 /* Atmel chips don't use the same PRI format as Intel chips */
-static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -202,7 +202,7 @@
 	cfi->cfiq->BufWriteTimeoutMax = 0;
 }
 
-static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
+static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -214,7 +214,7 @@
 
 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
-static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
+static void fixup_intel_strataflash(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -227,7 +227,7 @@
 #endif
 
 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
-static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
+static void fixup_no_write_suspend(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -240,7 +240,7 @@
 }
 #endif
 
-static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
+static void fixup_st_m28w320ct(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -249,7 +249,7 @@
 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
 }
 
-static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
+static void fixup_st_m28w320cb(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -259,7 +259,7 @@
 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 };
 
-static void fixup_use_point(struct mtd_info *mtd, void *param)
+static void fixup_use_point(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	if (!mtd->point && map_is_linear(map)) {
@@ -268,7 +268,7 @@
 	}
 }
 
-static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+static void fixup_use_write_buffers(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@
 /*
  * Some chips power-up with all sectors locked by default.
  */
-static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -295,31 +295,31 @@
 }
 
 static struct cfi_fixup cfi_fixup_table[] = {
-	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
-	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
-	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
+	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
+	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
+	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
 #endif
 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
 #endif
 #if !FORCE_WORD_WRITE
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 #endif
-	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
-	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
-	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
+	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
+	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+	{ 0, 0, NULL }
 };
 
 static struct cfi_fixup jedec_fixup_table[] = {
-	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
+	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
+	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
+	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
+	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
+	{ 0, 0, NULL }
 };
 static struct cfi_fixup fixup_table[] = {
 	/* The CFI vendor ids and the JEDEC vendor IDs appear
@@ -327,8 +327,8 @@
 	 * well.  This table is to pick all cases where
 	 * we know that is the case.
 	 */
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
+	{ 0, 0, NULL }
 };
 
 static void cfi_fixup_major_minor(struct cfi_private *cfi,
@@ -455,6 +455,7 @@
 	mtd->flags   = MTD_CAP_NORFLASH;
 	mtd->name    = map->name;
 	mtd->writesize = 1;
+	mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
 
 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3b8e32d..f072fcf 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -134,7 +134,7 @@
 
 #ifdef AMD_BOOTLOC_BUG
 /* Wheee. Bring me the head of someone at AMD. */
-static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
+static void fixup_amd_bootblock(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -186,7 +186,7 @@
 }
 #endif
 
-static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+static void fixup_use_write_buffers(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -197,7 +197,7 @@
 }
 
 /* Atmel chips don't use the same PRI format as AMD chips */
-static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -228,14 +228,14 @@
 	cfi->cfiq->BufWriteTimeoutMax = 0;
 }
 
-static void fixup_use_secsi(struct mtd_info *mtd, void *param)
+static void fixup_use_secsi(struct mtd_info *mtd)
 {
 	/* Setup for chips with a secsi area */
 	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
 	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
 }
 
-static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
+static void fixup_use_erase_chip(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -250,7 +250,7 @@
  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
  * locked by default.
  */
-static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
+static void fixup_use_atmel_lock(struct mtd_info *mtd)
 {
 	mtd->lock = cfi_atmel_lock;
 	mtd->unlock = cfi_atmel_unlock;
@@ -271,7 +271,7 @@
 	cfi->cfiq->NumEraseRegions = 1;
 }
 
-static void fixup_sst39vf(struct mtd_info *mtd, void *param)
+static void fixup_sst39vf(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@
 	cfi->addr_unlock2 = 0x2AAA;
 }
 
-static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
+static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -295,12 +295,12 @@
 	cfi->sector_erase_cmd = CMD(0x50);
 }
 
-static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
+static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 
-	fixup_sst39vf_rev_b(mtd, param);
+	fixup_sst39vf_rev_b(mtd);
 
 	/*
 	 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
@@ -310,7 +310,7 @@
 	pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
 }
 
-static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
+static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -321,7 +321,7 @@
 	}
 }
 
-static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
+static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -334,47 +334,47 @@
 
 /* Used to fix CFI-Tables of chips without Extended Query Tables */
 static struct cfi_fixup cfi_nopri_fixup_table[] = {
-	{ CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */
-	{ CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */
-	{ CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */
-	{ CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */
-	{ CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */
-	{ CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */
-	{ CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */
-	{ CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
+	{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
+	{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
+	{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
+	{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
+	{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
+	{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
+	{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
+	{ 0, 0, NULL }
 };
 
 static struct cfi_fixup cfi_fixup_table[] = {
-	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
+	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 #ifdef AMD_BOOTLOC_BUG
-	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
-	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
+	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 #endif
-	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
-	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
-	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
-	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
-	{ CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */
-	{ CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */
-	{ CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */
-	{ CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */
+	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
+	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
+	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
+	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
+	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
+	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
+	{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 #if !FORCE_WORD_WRITE
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 #endif
-	{ 0, 0, NULL, NULL }
+	{ 0, 0, NULL }
 };
 static struct cfi_fixup jedec_fixup_table[] = {
-	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
+	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
+	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
+	{ 0, 0, NULL }
 };
 
 static struct cfi_fixup fixup_table[] = {
@@ -383,18 +383,30 @@
 	 * well.  This table is to pick all cases where
 	 * we know that is the case.
 	 */
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
-	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
+	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
+	{ 0, 0, NULL }
 };
 
 
 static void cfi_fixup_major_minor(struct cfi_private *cfi,
 				  struct cfi_pri_amdstd *extp)
 {
-	if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
-	    extp->MajorVersion == '0')
-		extp->MajorVersion = '1';
+	if (cfi->mfr == CFI_MFR_SAMSUNG) {
+		if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
+		    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
+			/*
+			 * Samsung K8P2815UQB and K8D6x16UxM chips
+			 * report major=0 / minor=0.
+			 * K8D3x16UxC chips report major=3 / minor=3.
+			 */
+			printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
+			       " Extended Query version to 1.%c\n",
+			       extp->MinorVersion);
+			extp->MajorVersion = '1';
+		}
+	}
+
 	/*
 	 * SST 38VF640x chips report major=0xFF / minor=0xFF.
 	 */
@@ -428,6 +440,10 @@
 	mtd->flags   = MTD_CAP_NORFLASH;
 	mtd->name    = map->name;
 	mtd->writesize = 1;
+	mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
+
+	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
+		__func__, mtd->writebufsize);
 
 	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 314af1f..c04b765 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -238,6 +238,7 @@
 	mtd->resume = cfi_staa_resume;
 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
+	mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
 	map->fldrv = &cfi_staa_chipdrv;
 	__module_get(THIS_MODULE);
 	mtd->name = map->name;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 360525c..6ae3d11 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -156,7 +156,7 @@
 	for (f=fixups; f->fixup; f++) {
 		if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
 		    ((f->id  == CFI_ID_ANY)  || (f->id  == cfi->id))) {
-			f->fixup(mtd, f->param);
+			f->fixup(mtd);
 		}
 	}
 }
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index d180649..5e3cc80 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -98,7 +98,7 @@
 	return ret;
 }
 
-static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param)
+static void fixup_use_fwh_lock(struct mtd_info *mtd)
 {
 	printk(KERN_NOTICE "using fwh lock/unlock method\n");
 	/* Setup for the chips with the fwh lock method */
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index bf5a002..e4eba6c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -51,6 +51,10 @@
 #define	OPCODE_WRDI		0x04	/* Write disable */
 #define	OPCODE_AAI_WP		0xad	/* Auto address increment word program */
 
+/* Used for Macronix flashes only. */
+#define	OPCODE_EN4B		0xb7	/* Enter 4-byte mode */
+#define	OPCODE_EX4B		0xe9	/* Exit 4-byte mode */
+
 /* Status Register bits. */
 #define	SR_WIP			1	/* Write in progress */
 #define	SR_WEL			2	/* Write enable latch */
@@ -62,7 +66,7 @@
 
 /* Define max times to check status register before we give up. */
 #define	MAX_READY_WAIT_JIFFIES	(40 * HZ)	/* M25P16 specs 40s max chip erase */
-#define	MAX_CMD_SIZE		4
+#define	MAX_CMD_SIZE		5
 
 #ifdef CONFIG_M25PXX_USE_FAST_READ
 #define OPCODE_READ 	OPCODE_FAST_READ
@@ -152,6 +156,16 @@
 }
 
 /*
+ * Enable/disable 4-byte addressing mode.
+ */
+static inline int set_4byte(struct m25p *flash, int enable)
+{
+	u8	code = enable ? OPCODE_EN4B : OPCODE_EX4B;
+
+	return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
+}
+
+/*
  * Service routine to read status register until ready, or timeout occurs.
  * Returns non-zero if error.
  */
@@ -207,6 +221,7 @@
 	cmd[1] = addr >> (flash->addr_width * 8 -  8);
 	cmd[2] = addr >> (flash->addr_width * 8 - 16);
 	cmd[3] = addr >> (flash->addr_width * 8 - 24);
+	cmd[4] = addr >> (flash->addr_width * 8 - 32);
 }
 
 static int m25p_cmdsz(struct m25p *flash)
@@ -482,6 +497,10 @@
 	size_t actual;
 	int cmd_sz, ret;
 
+	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
+			dev_name(&flash->spi->dev), __func__, "to",
+			(u32)to, len);
+
 	*retlen = 0;
 
 	/* sanity checks */
@@ -607,7 +626,6 @@
 		.sector_size = (_sector_size),				\
 		.n_sectors = (_n_sectors),				\
 		.page_size = 256,					\
-		.addr_width = 3,					\
 		.flags = (_flags),					\
 	})
 
@@ -635,7 +653,7 @@
 	{ "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
 	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
 	{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
-	{ "at26df321",  INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
 
 	/* EON -- en25pxx */
 	{ "en25p32", INFO(0x1c2016, 0, 64 * 1024,  64, 0) },
@@ -653,6 +671,8 @@
 	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, 0) },
 	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
 	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
 
 	/* Spansion -- single (large) sector size only, at least
 	 * for the chips listed here (without boot sectors).
@@ -764,6 +784,7 @@
 			return &m25p_ids[tmp];
 		}
 	}
+	dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
 	return ERR_PTR(-ENODEV);
 }
 
@@ -883,7 +904,17 @@
 
 	flash->mtd.dev.parent = &spi->dev;
 	flash->page_size = info->page_size;
-	flash->addr_width = info->addr_width;
+
+	if (info->addr_width)
+		flash->addr_width = info->addr_width;
+	else {
+		/* enable 4-byte addressing if the device exceeds 16MiB */
+		if (flash->mtd.size > 0x1000000) {
+			flash->addr_width = 4;
+			set_4byte(flash, 1);
+		} else
+			flash->addr_width = 3;
+	}
 
 	dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
 			(long long)flash->mtd.size >> 10);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 684247a..c163e61 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -335,7 +335,7 @@
 	return ret;
 }
 
-static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
+static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
 {
 	struct flash_info *flash_info = NULL;
 	struct spi_message m;
@@ -375,7 +375,7 @@
 	return flash_info;
 }
 
-static int __init sst25l_probe(struct spi_device *spi)
+static int __devinit sst25l_probe(struct spi_device *spi)
 {
 	struct flash_info *flash_info;
 	struct sst25l_flash *flash;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 19fe92d..77d64ce 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -149,11 +149,8 @@
 	if (request_resource(&iomem_resource, &window->rsrc)) {
 		window->rsrc.parent = NULL;
 		printk(KERN_ERR MOD_NAME
-			" %s(): Unable to register resource"
-			" 0x%.16llx-0x%.16llx - kernel bug?\n",
-			__func__,
-			(unsigned long long)window->rsrc.start,
-			(unsigned long long)window->rsrc.end);
+		       " %s(): Unable to register resource %pR - kernel bug?\n",
+		       __func__, &window->rsrc);
 	}
 
 
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index d175c12..1f30495 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -196,10 +196,15 @@
 	bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
 	if (!bcm963xx_mtd_info) {
 		dev_err(&pdev->dev, "failed to probe using CFI\n");
+		bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
+		if (bcm963xx_mtd_info)
+			goto probe_ok;
+		dev_err(&pdev->dev, "failed to probe using JEDEC\n");
 		err = -EIO;
 		goto err_probe;
 	}
 
+probe_ok:
 	bcm963xx_mtd_info->owner = THIS_MODULE;
 
 	/* This is mutually exclusive */
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index ddb462b..5fdb7b2 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -178,11 +178,8 @@
 	if (request_resource(&iomem_resource, &window->rsrc)) {
 		window->rsrc.parent = NULL;
 		printk(KERN_ERR MOD_NAME
-			" %s(): Unable to register resource"
-			" 0x%.016llx-0x%.016llx - kernel bug?\n",
-			__func__,
-			(unsigned long long)window->rsrc.start,
-			(unsigned long long)window->rsrc.end);
+		       " %s(): Unable to register resource %pR - kernel bug?\n",
+			__func__, &window->rsrc);
 	}
 
 
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index d12c93d..4feb750 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -242,12 +242,9 @@
 	window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 	if (request_resource(&iomem_resource, &window->rsrc)) {
 		window->rsrc.parent = NULL;
-		printk(KERN_DEBUG MOD_NAME
-			": %s(): Unable to register resource"
-			" 0x%.08llx-0x%.08llx - kernel bug?\n",
-			__func__,
-			(unsigned long long)window->rsrc.start,
-			(unsigned long long)window->rsrc.end);
+		printk(KERN_DEBUG MOD_NAME ": "
+		       "%s(): Unable to register resource %pR - kernel bug?\n",
+			__func__, &window->rsrc);
 	}
 
 	/* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index f102bf2..1337a41 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -175,12 +175,9 @@
 	window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 	if (request_resource(&iomem_resource, &window->rsrc)) {
 		window->rsrc.parent = NULL;
-		printk(KERN_DEBUG MOD_NAME
-			": %s(): Unable to register resource"
-			" 0x%.16llx-0x%.16llx - kernel bug?\n",
-			__func__,
-			(unsigned long long)window->rsrc.start,
-			(unsigned long long)window->rsrc.end);
+		printk(KERN_DEBUG MOD_NAME ": "
+		       "%s(): Unable to register resource %pR - kernel bug?\n",
+		       __func__, &window->rsrc);
 	}
 
 	/* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 9861814..8506578 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -274,9 +274,7 @@
 			continue;
 		}
 
-		dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
-			(unsigned long long)res.start,
-			(unsigned long long)res.end);
+		dev_dbg(&dev->dev, "of_flash device: %pR\n", &res);
 
 		err = -EBUSY;
 		res_size = resource_size(&res);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index b5391eb..027e628 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -166,9 +166,8 @@
 		outl(pmr, scx200_cb_base + SCx200_PMR);
 	}
 
-       	printk(KERN_INFO NAME ": DOCCS mapped at 0x%llx-0x%llx, width %d\n",
-			(unsigned long long)docmem.start,
-			(unsigned long long)docmem.end, width);
+	printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
+	       &docmem, width);
 
 	scx200_docflash_map.size = size;
 	if (width == 8)
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 6014698..c08e140 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -139,7 +139,7 @@
 			goto error_mem;
 		}
 
-		map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
+		map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
 
 		if (!map_banks[idx]->name) {
 			ret = -ENOMEM;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index ee4bb33..145b3d0d 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -522,10 +522,6 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	/* Only master mtd device must be used to control partitions */
-	if (!mtd_is_master(mtd))
-		return -EINVAL;
-
 	if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
 		return -EFAULT;
 
@@ -535,6 +531,10 @@
 	switch (a.op) {
 	case BLKPG_ADD_PARTITION:
 
+		/* Only master mtd device must be used to add partitions */
+		if (mtd_is_partition(mtd))
+			return -EINVAL;
+
 		return mtd_add_partition(mtd, p.devname, p.start, p.length);
 
 	case BLKPG_DEL_PARTITION:
@@ -601,6 +601,7 @@
 	}
 
 	case MEMGETINFO:
+		memset(&info, 0, sizeof(info));
 		info.type	= mtd->type;
 		info.flags	= mtd->flags;
 		info.size	= mtd->size;
@@ -609,7 +610,6 @@
 		info.oobsize	= mtd->oobsize;
 		/* The below fields are obsolete */
 		info.ecctype	= -1;
-		info.eccsize	= 0;
 		if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
 			return -EFAULT;
 		break;
@@ -1201,7 +1201,7 @@
 static void __exit cleanup_mtdchar(void)
 {
 	unregister_mtd_user(&mtdchar_notifier);
-	mntput_long(mtd_inode_mnt);
+	mntput(mtd_inode_mnt);
 	unregister_filesystem(&mtd_inodefs_type);
 	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
 }
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index bf8de09..5f5777b 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -776,6 +776,7 @@
 	concat->mtd.size = subdev[0]->size;
 	concat->mtd.erasesize = subdev[0]->erasesize;
 	concat->mtd.writesize = subdev[0]->writesize;
+	concat->mtd.writebufsize = subdev[0]->writebufsize;
 	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
 	concat->mtd.oobsize = subdev[0]->oobsize;
 	concat->mtd.oobavail = subdev[0]->oobavail;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index c948150..e3e40f4 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -401,7 +401,8 @@
 		printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
 
 	cxt->mtd = NULL;
-	flush_scheduled_work();
+	flush_work_sync(&cxt->work_erase);
+	flush_work_sync(&cxt->work_write);
 }
 
 
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 79e3689..0a47601 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -120,8 +120,25 @@
 		return -EINVAL;
 	if (ops->datbuf && from + ops->len > mtd->size)
 		return -EINVAL;
-	res = part->master->read_oob(part->master, from + part->offset, ops);
 
+	/*
+	 * If OOB is also requested, make sure that we do not read past the end
+	 * of this partition.
+	 */
+	if (ops->oobbuf) {
+		size_t len, pages;
+
+		if (ops->mode == MTD_OOB_AUTO)
+			len = mtd->oobavail;
+		else
+			len = mtd->oobsize;
+		pages = mtd_div_by_ws(mtd->size, mtd);
+		pages -= mtd_div_by_ws(from, mtd);
+		if (ops->ooboffs + ops->ooblen > pages * len)
+			return -EINVAL;
+	}
+
+	res = part->master->read_oob(part->master, from + part->offset, ops);
 	if (unlikely(res)) {
 		if (res == -EUCLEAN)
 			mtd->ecc_stats.corrected++;
@@ -384,6 +401,7 @@
 	slave->mtd.flags = master->flags & ~part->mask_flags;
 	slave->mtd.size = part->size;
 	slave->mtd.writesize = master->writesize;
+	slave->mtd.writebufsize = master->writebufsize;
 	slave->mtd.oobsize = master->oobsize;
 	slave->mtd.oobavail = master->oobavail;
 	slave->mtd.subpage_sft = master->subpage_sft;
@@ -720,19 +738,19 @@
 }
 EXPORT_SYMBOL_GPL(parse_mtd_partitions);
 
-int mtd_is_master(struct mtd_info *mtd)
+int mtd_is_partition(struct mtd_info *mtd)
 {
 	struct mtd_part *part;
-	int nopart = 0;
+	int ispart = 0;
 
 	mutex_lock(&mtd_partitions_mutex);
 	list_for_each_entry(part, &mtd_partitions, list)
 		if (&part->mtd == mtd) {
-			nopart = 1;
+			ispart = 1;
 			break;
 		}
 	mutex_unlock(&mtd_partitions_mutex);
 
-	return nopart;
+	return ispart;
 }
-EXPORT_SYMBOL_GPL(mtd_is_master);
+EXPORT_SYMBOL_GPL(mtd_is_partition);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8229802..c895922 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -96,6 +96,7 @@
 config MTD_NAND_AMS_DELTA
 	tristate "NAND Flash device on Amstrad E3"
 	depends on MACH_AMS_DELTA
+	default y
 	help
 	  Support for NAND flash on Amstrad E3 (Delta).
 
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 2548e10..a067d09 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -4,6 +4,8 @@
  *  Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
  *
  *  Derived from drivers/mtd/toto.c
+ *  Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *  Partially stolen from drivers/mtd/nand/plat_nand.c
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -62,9 +64,10 @@
 static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
 {
 	struct nand_chip *this = mtd->priv;
+	void __iomem *io_base = this->priv;
 
-	omap_writew(0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
-	omap_writew(byte, this->IO_ADDR_W);
+	writew(0, io_base + OMAP_MPUIO_IO_CNTL);
+	writew(byte, this->IO_ADDR_W);
 	ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
 	ndelay(40);
 	ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
@@ -75,11 +78,12 @@
 {
 	u_char res;
 	struct nand_chip *this = mtd->priv;
+	void __iomem *io_base = this->priv;
 
 	ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
 	ndelay(40);
-	omap_writew(~0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
-	res = omap_readw(this->IO_ADDR_R);
+	writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
+	res = readw(this->IO_ADDR_R);
 	ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
 			       AMS_DELTA_LATCH2_NAND_NRE);
 
@@ -151,11 +155,16 @@
 /*
  * Main initialization routine
  */
-static int __init ams_delta_init(void)
+static int __devinit ams_delta_init(struct platform_device *pdev)
 {
 	struct nand_chip *this;
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	void __iomem *io_base;
 	int err = 0;
 
+	if (!res)
+		return -ENXIO;
+
 	/* Allocate memory for MTD device structure and private data */
 	ams_delta_mtd = kmalloc(sizeof(struct mtd_info) +
 				sizeof(struct nand_chip), GFP_KERNEL);
@@ -177,9 +186,25 @@
 	/* Link the private data with the MTD structure */
 	ams_delta_mtd->priv = this;
 
+	if (!request_mem_region(res->start, resource_size(res),
+			dev_name(&pdev->dev))) {
+		dev_err(&pdev->dev, "request_mem_region failed\n");
+		err = -EBUSY;
+		goto out_free;
+	}
+
+	io_base = ioremap(res->start, resource_size(res));
+	if (io_base == NULL) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		err = -EIO;
+		goto out_release_io;
+	}
+
+	this->priv = io_base;
+
 	/* Set address of NAND IO lines */
-	this->IO_ADDR_R = (OMAP1_MPUIO_BASE + OMAP_MPUIO_INPUT_LATCH);
-	this->IO_ADDR_W = (OMAP1_MPUIO_BASE + OMAP_MPUIO_OUTPUT);
+	this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
+	this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
 	this->read_byte = ams_delta_read_byte;
 	this->write_buf = ams_delta_write_buf;
 	this->read_buf = ams_delta_read_buf;
@@ -195,6 +220,8 @@
 	this->chip_delay = 30;
 	this->ecc.mode = NAND_ECC_SOFT;
 
+	platform_set_drvdata(pdev, io_base);
+
 	/* Set chip enabled, but  */
 	ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
 					  AMS_DELTA_LATCH2_NAND_NWE |
@@ -214,25 +241,56 @@
 	goto out;
 
  out_mtd:
+	platform_set_drvdata(pdev, NULL);
+	iounmap(io_base);
+out_release_io:
+	release_mem_region(res->start, resource_size(res));
+out_free:
 	kfree(ams_delta_mtd);
  out:
 	return err;
 }
 
-module_init(ams_delta_init);
-
 /*
  * Clean up routine
  */
-static void __exit ams_delta_cleanup(void)
+static int __devexit ams_delta_cleanup(struct platform_device *pdev)
 {
+	void __iomem *io_base = platform_get_drvdata(pdev);
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
 	/* Release resources, unregister device */
 	nand_release(ams_delta_mtd);
 
+	iounmap(io_base);
+	release_mem_region(res->start, resource_size(res));
+
 	/* Free the MTD device structure */
 	kfree(ams_delta_mtd);
+
+	return 0;
 }
-module_exit(ams_delta_cleanup);
+
+static struct platform_driver ams_delta_nand_driver = {
+	.probe		= ams_delta_init,
+	.remove		= __devexit_p(ams_delta_cleanup),
+	.driver		= {
+		.name	= "ams-delta-nand",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init ams_delta_nand_init(void)
+{
+	return platform_driver_register(&ams_delta_nand_driver);
+}
+module_init(ams_delta_nand_init);
+
+static void __exit ams_delta_nand_exit(void)
+{
+	platform_driver_unregister(&ams_delta_nand_driver);
+}
+module_exit(ams_delta_nand_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index c141b07..7a13d42 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -388,6 +388,8 @@
 		         "page_addr: 0x%x, column: 0x%x.\n",
 		         page_addr, column);
 
+		elbc_fcm_ctrl->column = column;
+		elbc_fcm_ctrl->oob = 0;
 		elbc_fcm_ctrl->use_mdr = 1;
 
 		fcr = (NAND_CMD_STATUS   << FCR_CMD1_SHIFT) |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 02edfba..205b10b 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/mtd/fsmc.h>
+#include <linux/amba/bus.h>
 #include <mtd/mtd-abi.h>
 
 static struct nand_ecclayout fsmc_ecc1_layout = {
@@ -119,21 +120,36 @@
 	}
 };
 
+
+#ifdef CONFIG_MTD_PARTITIONS
 /*
  * Default partition tables to be used if the partition information not
- * provided through platform data
- */
-#define PARTITION(n, off, sz)	{.name = n, .offset = off, .size = sz}
-
-/*
+ * provided through platform data.
+ *
  * Default partition layout for small page(= 512 bytes) devices
  * Size for "Root file system" is updated in driver based on actual device size
  */
 static struct mtd_partition partition_info_16KB_blk[] = {
-	PARTITION("X-loader", 0, 4 * 0x4000),
-	PARTITION("U-Boot", 0x10000, 20 * 0x4000),
-	PARTITION("Kernel", 0x60000, 256 * 0x4000),
-	PARTITION("Root File System", 0x460000, 0),
+	{
+		.name = "X-loader",
+		.offset = 0,
+		.size = 4*0x4000,
+	},
+	{
+		.name = "U-Boot",
+		.offset = 0x10000,
+		.size = 20*0x4000,
+	},
+	{
+		.name = "Kernel",
+		.offset = 0x60000,
+		.size = 256*0x4000,
+	},
+	{
+		.name = "Root File System",
+		.offset = 0x460000,
+		.size = 0,
+	},
 };
 
 /*
@@ -141,19 +157,37 @@
  * Size for "Root file system" is updated in driver based on actual device size
  */
 static struct mtd_partition partition_info_128KB_blk[] = {
-	PARTITION("X-loader", 0, 4 * 0x20000),
-	PARTITION("U-Boot", 0x80000, 12 * 0x20000),
-	PARTITION("Kernel", 0x200000, 48 * 0x20000),
-	PARTITION("Root File System", 0x800000, 0),
+	{
+		.name = "X-loader",
+		.offset = 0,
+		.size = 4*0x20000,
+	},
+	{
+		.name = "U-Boot",
+		.offset = 0x80000,
+		.size = 12*0x20000,
+	},
+	{
+		.name = "Kernel",
+		.offset = 0x200000,
+		.size = 48*0x20000,
+	},
+	{
+		.name = "Root File System",
+		.offset = 0x800000,
+		.size = 0,
+	},
 };
 
 #ifdef CONFIG_MTD_CMDLINE_PARTS
 const char *part_probes[] = { "cmdlinepart", NULL };
 #endif
+#endif
 
 /**
- * struct fsmc_nand_data - atructure for FSMC NAND device state
+ * struct fsmc_nand_data - structure for FSMC NAND device state
  *
+ * @pid:		Part ID on the AMBA PrimeCell format
  * @mtd:		MTD info for a NAND flash.
  * @nand:		Chip related info for a NAND flash.
  * @partitions:		Partition info for a NAND Flash.
@@ -169,6 +203,7 @@
  * @regs_va:		FSMC regs base address.
  */
 struct fsmc_nand_data {
+	u32			pid;
 	struct mtd_info		mtd;
 	struct nand_chip	nand;
 	struct mtd_partition	*partitions;
@@ -508,7 +543,9 @@
 	struct nand_chip *nand;
 	struct fsmc_regs *regs;
 	struct resource *res;
-	int nr_parts, ret = 0;
+	int ret = 0;
+	u32 pid;
+	int i;
 
 	if (!pdata) {
 		dev_err(&pdev->dev, "platform data is NULL\n");
@@ -598,6 +635,18 @@
 	if (ret)
 		goto err_probe1;
 
+	/*
+	 * This device ID is actually a common AMBA ID as used on the
+	 * AMBA PrimeCell bus. However it is not a PrimeCell.
+	 */
+	for (pid = 0, i = 0; i < 4; i++)
+		pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
+	host->pid = pid;
+	dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
+		 "revision %02x, config %02x\n",
+		 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
+		 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
+
 	host->bank = pdata->bank;
 	host->select_chip = pdata->select_bank;
 	regs = host->regs_va;
@@ -625,7 +674,7 @@
 
 	fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
 
-	if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+	if (AMBA_REV_BITS(host->pid) >= 8) {
 		nand->ecc.read_page = fsmc_read_page_hwecc;
 		nand->ecc.calculate = fsmc_read_hwecc_ecc4;
 		nand->ecc.correct = fsmc_correct_data;
@@ -645,7 +694,7 @@
 		goto err_probe;
 	}
 
-	if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+	if (AMBA_REV_BITS(host->pid) >= 8) {
 		if (host->mtd.writesize == 512) {
 			nand->ecc.layout = &fsmc_ecc4_sp_layout;
 			host->ecc_place = &fsmc_ecc4_sp_place;
@@ -676,11 +725,9 @@
 	 * Check if partition info passed via command line
 	 */
 	host->mtd.name = "nand";
-	nr_parts = parse_mtd_partitions(&host->mtd, part_probes,
+	host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
 			&host->partitions, 0);
-	if (nr_parts > 0) {
-		host->nr_partitions = nr_parts;
-	} else {
+	if (host->nr_partitions <= 0) {
 #endif
 		/*
 		 * Check if partition info passed via command line
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 67343fc..cea38a5 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,58 +251,6 @@
 	return 0;
 }
 
-
-/* Copy paste of nand_read_page_hwecc_oob_first except for different eccpos
- * handling. The ecc area is for 4k chips 72 bytes long and thus does not fit
- * into the eccpos array. */
-static int jz_nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
-	struct nand_chip *chip, uint8_t *buf, int page)
-{
-	int i, eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *p = buf;
-	unsigned int ecc_offset = chip->page_shift;
-
-	/* Read the OOB area first */
-	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
-	for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		int stat;
-
-		chip->ecc.hwctl(mtd, NAND_ECC_READ);
-		chip->read_buf(mtd, p, eccsize);
-
-		stat = chip->ecc.correct(mtd, p, &chip->oob_poi[i], NULL);
-		if (stat < 0)
-			mtd->ecc_stats.failed++;
-		else
-			mtd->ecc_stats.corrected += stat;
-	}
-	return 0;
-}
-
-/* Copy-and-paste of nand_write_page_hwecc with different eccpos handling. */
-static void jz_nand_write_page_hwecc(struct mtd_info *mtd,
-	struct nand_chip *chip, const uint8_t *buf)
-{
-	int i, eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	const uint8_t *p = buf;
-	unsigned int ecc_offset = chip->page_shift;
-
-	for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
-		chip->write_buf(mtd, p, eccsize);
-		chip->ecc.calculate(mtd, p, &chip->oob_poi[i]);
-	}
-
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-}
-
 #ifdef CONFIG_MTD_CMDLINE_PARTS
 static const char *part_probes[] = {"cmdline", NULL};
 #endif
@@ -393,9 +341,6 @@
 	chip->ecc.size		= 512;
 	chip->ecc.bytes		= 9;
 
-	chip->ecc.read_page	= jz_nand_read_page_hwecc_oob_first;
-	chip->ecc.write_page	= jz_nand_write_page_hwecc;
-
 	if (pdata)
 		chip->ecc.layout = pdata->ecc_layout;
 
@@ -489,7 +434,7 @@
 	return 0;
 }
 
-struct platform_driver jz_nand_driver = {
+static struct platform_driver jz_nand_driver = {
 	.probe = jz_nand_probe,
 	.remove = __devexit_p(jz_nand_remove),
 	.driver = {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 214b03a..ef932ba 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1009,7 +1009,7 @@
 	struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
 	struct mxc_nand_host *host;
 	struct resource *res;
-	int err = 0, nr_parts = 0;
+	int err = 0, __maybe_unused nr_parts = 0;
 	struct nand_ecclayout *oob_smallpage, *oob_largepage;
 
 	/* Allocate memory for MTD device structure and private data */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 31bf376..a9c6ce7 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2865,20 +2865,24 @@
 
 	/* check version */
 	val = le16_to_cpu(p->revision);
-	if (val == 1 || val > (1 << 4)) {
-		printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
-								__func__, val);
-		return 0;
-	}
-
-	if (val & (1 << 4))
+	if (val & (1 << 5))
+		chip->onfi_version = 23;
+	else if (val & (1 << 4))
 		chip->onfi_version = 22;
 	else if (val & (1 << 3))
 		chip->onfi_version = 21;
 	else if (val & (1 << 2))
 		chip->onfi_version = 20;
-	else
+	else if (val & (1 << 1))
 		chip->onfi_version = 10;
+	else
+		chip->onfi_version = 0;
+
+	if (!chip->onfi_version) {
+		printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
+								__func__, val);
+		return 0;
+	}
 
 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
 	sanitize_string(p->model, sizeof(p->model));
@@ -2887,7 +2891,7 @@
 	mtd->writesize = le32_to_cpu(p->byte_per_page);
 	mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-	chip->chipsize = le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
+	chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
 	busw = 0;
 	if (le16_to_cpu(p->features) & 1)
 		busw = NAND_BUSWIDTH_16;
@@ -3157,7 +3161,7 @@
 	printk(KERN_INFO "NAND device: Manufacturer ID:"
 		" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
 		nand_manuf_ids[maf_idx].name,
-	chip->onfi_version ? type->name : chip->onfi_params.model);
+		chip->onfi_version ? chip->onfi_params.model : type->name);
 
 	return type;
 }
@@ -3435,6 +3439,7 @@
 	mtd->resume = nand_resume;
 	mtd->block_isbad = nand_block_isbad;
 	mtd->block_markbad = nand_block_markbad;
+	mtd->writebufsize = mtd->writesize;
 
 	/* propagate ecc.layout to mtd_info */
 	mtd->ecclayout = chip->ecc.layout;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 586b981..6ebd869 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1092,7 +1092,8 @@
 
 /**
  * verify_bbt_descr - verify the bad block description
- * @bd:			the table to verify
+ * @mtd:	MTD device structure
+ * @bd:		the table to verify
  *
  * This functions performs a few sanity checks on the bad block description
  * table.
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a6a73aa..a5aa99f 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -210,12 +210,12 @@
 #define STATE_CMD_READ0        0x00000001 /* read data from the beginning of page */
 #define STATE_CMD_READ1        0x00000002 /* read data from the second half of page */
 #define STATE_CMD_READSTART    0x00000003 /* read data second command (large page devices) */
-#define STATE_CMD_PAGEPROG     0x00000004 /* start page programm */
+#define STATE_CMD_PAGEPROG     0x00000004 /* start page program */
 #define STATE_CMD_READOOB      0x00000005 /* read OOB area */
 #define STATE_CMD_ERASE1       0x00000006 /* sector erase first command */
 #define STATE_CMD_STATUS       0x00000007 /* read status */
 #define STATE_CMD_STATUS_M     0x00000008 /* read multi-plane status (isn't implemented) */
-#define STATE_CMD_SEQIN        0x00000009 /* sequential data imput */
+#define STATE_CMD_SEQIN        0x00000009 /* sequential data input */
 #define STATE_CMD_READID       0x0000000A /* read ID */
 #define STATE_CMD_ERASE2       0x0000000B /* sector erase second command */
 #define STATE_CMD_RESET        0x0000000C /* reset */
@@ -230,7 +230,7 @@
 #define STATE_ADDR_ZERO        0x00000040 /* one byte zero address was accepted */
 #define STATE_ADDR_MASK        0x00000070 /* address states mask */
 
-/* Durind data input/output the simulator is in these states */
+/* During data input/output the simulator is in these states */
 #define STATE_DATAIN           0x00000100 /* waiting for data input */
 #define STATE_DATAIN_MASK      0x00000100 /* data input states mask */
 
@@ -248,7 +248,7 @@
 
 /* Simulator's actions bit masks */
 #define ACTION_CPY       0x00100000 /* copy page/OOB to the internal buffer */
-#define ACTION_PRGPAGE   0x00200000 /* programm the internal buffer to flash */
+#define ACTION_PRGPAGE   0x00200000 /* program the internal buffer to flash */
 #define ACTION_SECERASE  0x00300000 /* erase sector */
 #define ACTION_ZEROOFF   0x00400000 /* don't add any offset to address */
 #define ACTION_HALFOFF   0x00500000 /* add to address half of page */
@@ -263,18 +263,18 @@
 #define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
 #define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
 #define OPT_SMARTMEDIA   0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR     0x00000020 /* page number auto inctimentation is possible */
+#define OPT_AUTOINCR     0x00000020 /* page number auto incrementation is possible */
 #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
 #define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
 #define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
 #define OPT_SMALLPAGE    (OPT_PAGE256  | OPT_PAGE512)  /* 256 and 512-byte page chips */
 
-/* Remove action bits ftom state */
+/* Remove action bits from state */
 #define NS_STATE(x) ((x) & ~ACTION_MASK)
 
 /*
  * Maximum previous states which need to be saved. Currently saving is
- * only needed for page programm operation with preceeded read command
+ * only needed for page program operation with preceded read command
  * (which is only valid for 512-byte pages).
  */
 #define NS_MAX_PREVSTATES 1
@@ -380,16 +380,16 @@
 	/* Read OOB */
 	{OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
 			STATE_DATAOUT, STATE_READY}},
-	/* Programm page starting from the beginning */
+	/* Program page starting from the beginning */
 	{OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
 			STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Programm page starting from the beginning */
+	/* Program page starting from the beginning */
 	{OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
 			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Programm page starting from the second half */
+	/* Program page starting from the second half */
 	{OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
 			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Programm OOB */
+	/* Program OOB */
 	{OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
 			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
 	/* Erase sector */
@@ -470,7 +470,7 @@
 			err = -EINVAL;
 			goto err_close;
 		}
-		ns->pages_written = vmalloc(ns->geom.pgnum);
+		ns->pages_written = vzalloc(ns->geom.pgnum);
 		if (!ns->pages_written) {
 			NS_ERR("alloc_device: unable to allocate pages written array\n");
 			err = -ENOMEM;
@@ -483,7 +483,6 @@
 			goto err_free;
 		}
 		ns->cfile = cfile;
-		memset(ns->pages_written, 0, ns->geom.pgnum);
 		return 0;
 	}
 
@@ -1171,9 +1170,9 @@
  * of supported operations.
  *
  * Operation can be unknown because of the following.
- *   1. New command was accepted and this is the firs call to find the
+ *   1. New command was accepted and this is the first call to find the
  *      correspondent states chain. In this case ns->npstates = 0;
- *   2. There is several operations which begin with the same command(s)
+ *   2. There are several operations which begin with the same command(s)
  *      (for example program from the second half and read from the
  *      second half operations both begin with the READ1 command). In this
  *      case the ns->pstates[] array contains previous states.
@@ -1186,7 +1185,7 @@
  * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
  * zeroed).
  *
- * If there are several maches, the current state is pushed to the
+ * If there are several matches, the current state is pushed to the
  * ns->pstates.
  *
  * The operation can be unknown only while commands are input to the chip.
@@ -1195,10 +1194,10 @@
  * operation is searched using the following pattern:
  *     ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
  *
- * It is supposed that this pattern must either match one operation on
+ * It is supposed that this pattern must either match one operation or
  * none. There can't be ambiguity in that case.
  *
- * If no matches found, the functions does the following:
+ * If no matches found, the function does the following:
  *   1. if there are saved states present, try to ignore them and search
  *      again only using the last command. If nothing was found, switch
  *      to the STATE_READY state.
@@ -1668,7 +1667,7 @@
 
 	case ACTION_PRGPAGE:
 		/*
-		 * Programm page - move internal buffer data to the page.
+		 * Program page - move internal buffer data to the page.
 		 */
 
 		if (ns->lines.wp) {
@@ -1933,7 +1932,7 @@
 		NS_DBG("read_byte: all bytes were read\n");
 
 		/*
-		 * The OPT_AUTOINCR allows to read next conseqitive pages without
+		 * The OPT_AUTOINCR allows to read next consecutive pages without
 		 * new read operation cycle.
 		 */
 		if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 6ddb246..bb277a5 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -107,7 +107,7 @@
 	if (pasemi_nand_mtd)
 		return -ENODEV;
 
-	pr_debug("pasemi_nand at %llx-%llx\n", res.start, res.end);
+	pr_debug("pasemi_nand at %pR\n", &res);
 
 	/* Allocate memory for MTD device structure and private data */
 	pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 17f8518..ea2c288 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -885,6 +885,7 @@
 	/* set info fields needed to __readid */
 	info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
 	info->reg_ndcr = ndcr;
+	info->cmdset = &default_cmdset;
 
 	if (__readid(info, &id))
 		return -ENODEV;
@@ -915,7 +916,6 @@
 
 	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
 	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
-	info->cmdset = &default_cmdset;
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 054a41c..ca270a4 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -277,8 +277,9 @@
 	ret = nand_scan_ident(mtd, 1, NULL);
 	if (!ret) {
 		if (mtd->writesize >= 512) {
-			chip->ecc.size = mtd->writesize;
-			chip->ecc.bytes = 3 * (mtd->writesize / 256);
+			/* Hardware ECC 6 byte ECC per 512 Byte data */
+			chip->ecc.size = 512;
+			chip->ecc.bytes = 6;
 		}
 		ret = nand_scan_tail(mtd);
 	}
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index d0894ca7..ac31f46 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -35,6 +35,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/regulator/consumer.h>
 
 #include <asm/mach/flash.h>
 #include <plat/gpmc.h>
@@ -63,8 +64,13 @@
 	int dma_channel;
 	int freq;
 	int (*setup)(void __iomem *base, int freq);
+	struct regulator *regulator;
 };
 
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL,  };
+#endif
+
 static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
 {
 	struct omap2_onenand *c = data;
@@ -108,8 +114,9 @@
 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
 {
 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+	struct onenand_chip *this = mtd->priv;
 	unsigned int intr = 0;
-	unsigned int ctrl;
+	unsigned int ctrl, ctrl_mask;
 	unsigned long timeout;
 	u32 syscfg;
 
@@ -180,7 +187,8 @@
 			if (result == 0) {
 				/* Timeout after 20ms */
 				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
-				if (ctrl & ONENAND_CTRL_ONGO) {
+				if (ctrl & ONENAND_CTRL_ONGO &&
+				    !this->ongoing) {
 					/*
 					 * The operation seems to be still going
 					 * so give it some more time.
@@ -269,7 +277,11 @@
 		return -EIO;
 	}
 
-	if (ctrl & 0xFE9F)
+	ctrl_mask = 0xFE9F;
+	if (this->ongoing)
+		ctrl_mask &= ~0x8000;
+
+	if (ctrl & ctrl_mask)
 		wait_warn("unexpected controller status", state, ctrl, intr);
 
 	return 0;
@@ -591,6 +603,30 @@
 	memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
 }
 
+static int omap2_onenand_enable(struct mtd_info *mtd)
+{
+	int ret;
+	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+	ret = regulator_enable(c->regulator);
+	if (ret != 0)
+		dev_err(&c->pdev->dev, "cant enable regulator\n");
+
+	return ret;
+}
+
+static int omap2_onenand_disable(struct mtd_info *mtd)
+{
+	int ret;
+	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+	ret = regulator_disable(c->regulator);
+	if (ret != 0)
+		dev_err(&c->pdev->dev, "cant disable regulator\n");
+
+	return ret;
+}
+
 static int __devinit omap2_onenand_probe(struct platform_device *pdev)
 {
 	struct omap_onenand_platform_data *pdata;
@@ -705,8 +741,18 @@
 		}
 	}
 
+	if (pdata->regulator_can_sleep) {
+		c->regulator = regulator_get(&pdev->dev, "vonenand");
+		if (IS_ERR(c->regulator)) {
+			dev_err(&pdev->dev,  "Failed to get regulator\n");
+			goto err_release_dma;
+		}
+		c->onenand.enable = omap2_onenand_enable;
+		c->onenand.disable = omap2_onenand_disable;
+	}
+
 	if ((r = onenand_scan(&c->mtd, 1)) < 0)
-		goto err_release_dma;
+		goto err_release_regulator;
 
 	switch ((c->onenand.version_id >> 4) & 0xf) {
 	case 0:
@@ -727,13 +773,15 @@
 	}
 
 #ifdef CONFIG_MTD_PARTITIONS
-	if (pdata->parts != NULL)
-		r = add_mtd_partitions(&c->mtd, pdata->parts,
-				       pdata->nr_parts);
+	r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
+	if (r > 0)
+		r = add_mtd_partitions(&c->mtd, c->parts, r);
+	else if (pdata->parts != NULL)
+		r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
 	else
 #endif
 		r = add_mtd_device(&c->mtd);
-	if (r < 0)
+	if (r)
 		goto err_release_onenand;
 
 	platform_set_drvdata(pdev, c);
@@ -742,6 +790,8 @@
 
 err_release_onenand:
 	onenand_release(&c->mtd);
+err_release_regulator:
+	regulator_put(c->regulator);
 err_release_dma:
 	if (c->dma_channel != -1)
 		omap_free_dma(c->dma_channel);
@@ -757,6 +807,7 @@
 err_free_cs:
 	gpmc_cs_free(c->gpmc_cs);
 err_kfree:
+	kfree(c->parts);
 	kfree(c);
 
 	return r;
@@ -766,18 +817,8 @@
 {
 	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
 
-	BUG_ON(c == NULL);
-
-#ifdef CONFIG_MTD_PARTITIONS
-	if (c->parts)
-		del_mtd_partitions(&c->mtd);
-	else
-		del_mtd_device(&c->mtd);
-#else
-	del_mtd_device(&c->mtd);
-#endif
-
 	onenand_release(&c->mtd);
+	regulator_put(c->regulator);
 	if (c->dma_channel != -1)
 		omap_free_dma(c->dma_channel);
 	omap2_onenand_shutdown(pdev);
@@ -789,6 +830,7 @@
 	iounmap(c->onenand.base);
 	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
 	gpmc_cs_free(c->gpmc_cs);
+	kfree(c->parts);
 	kfree(c);
 
 	return 0;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 6b3a875..bac41ca 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -400,8 +400,7 @@
 		value = onenand_bufferram_address(this, block);
 		this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
 
-		if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) ||
-		    ONENAND_IS_4KB_PAGE(this))
+		if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
 			/* It is always BufferRAM0 */
 			ONENAND_SET_BUFFERRAM0(this);
 		else
@@ -430,7 +429,7 @@
 		case FLEXONENAND_CMD_RECOVER_LSB:
 		case ONENAND_CMD_READ:
 		case ONENAND_CMD_READOOB:
-			if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+			if (ONENAND_IS_4KB_PAGE(this))
 				/* It is always BufferRAM0 */
 				dataram = ONENAND_SET_BUFFERRAM0(this);
 			else
@@ -949,6 +948,8 @@
 		if (this->state == FL_READY) {
 			this->state = new_state;
 			spin_unlock(&this->chip_lock);
+			if (new_state != FL_PM_SUSPENDED && this->enable)
+				this->enable(mtd);
 			break;
 		}
 		if (new_state == FL_PM_SUSPENDED) {
@@ -975,6 +976,8 @@
 {
 	struct onenand_chip *this = mtd->priv;
 
+	if (this->state != FL_PM_SUSPENDED && this->disable)
+		this->disable(mtd);
 	/* Release the chip */
 	spin_lock(&this->chip_lock);
 	this->state = FL_READY;
@@ -1353,7 +1356,7 @@
 
 	stats = mtd->ecc_stats;
 
-	readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+	readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
 
 	while (read < len) {
 		cond_resched();
@@ -1429,7 +1432,7 @@
 	int ret;
 
 	onenand_get_device(mtd, FL_READING);
-	ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+	ret = ONENAND_IS_4KB_PAGE(this) ?
 		onenand_mlc_read_ops_nolock(mtd, from, &ops) :
 		onenand_read_ops_nolock(mtd, from, &ops);
 	onenand_release_device(mtd);
@@ -1464,7 +1467,7 @@
 
 	onenand_get_device(mtd, FL_READING);
 	if (ops->datbuf)
-		ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+		ret = ONENAND_IS_4KB_PAGE(this) ?
 			onenand_mlc_read_ops_nolock(mtd, from, ops) :
 			onenand_read_ops_nolock(mtd, from, ops);
 	else
@@ -1485,8 +1488,7 @@
 {
 	struct onenand_chip *this = mtd->priv;
 	unsigned long timeout;
-	unsigned int interrupt;
-	unsigned int ctrl;
+	unsigned int interrupt, ctrl, ecc, addr1, addr8;
 
 	/* The 20 msec is enough */
 	timeout = jiffies + msecs_to_jiffies(20);
@@ -1498,25 +1500,28 @@
 	/* To get correct interrupt status in timeout case */
 	interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
 	ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+	addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
+	addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
 
 	if (interrupt & ONENAND_INT_READ) {
-		int ecc = onenand_read_ecc(this);
+		ecc = onenand_read_ecc(this);
 		if (ecc & ONENAND_ECC_2BIT_ALL) {
-			printk(KERN_WARNING "%s: ecc error = 0x%04x, "
-				"controller error 0x%04x\n",
-				__func__, ecc, ctrl);
+			printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
+			       "intr 0x%04x addr1 %#x addr8 %#x\n",
+			       __func__, ecc, ctrl, interrupt, addr1, addr8);
 			return ONENAND_BBT_READ_ECC_ERROR;
 		}
 	} else {
-		printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
-			__func__, ctrl, interrupt);
+		printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
+		       "intr 0x%04x addr1 %#x addr8 %#x\n",
+		       __func__, ctrl, interrupt, addr1, addr8);
 		return ONENAND_BBT_READ_FATAL_ERROR;
 	}
 
 	/* Initial bad block case: 0x2400 or 0x0400 */
 	if (ctrl & ONENAND_CTRL_ERROR) {
-		printk(KERN_DEBUG "%s: controller error = 0x%04x\n",
-			__func__, ctrl);
+		printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
+		       "addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
 		return ONENAND_BBT_READ_ERROR;
 	}
 
@@ -1558,7 +1563,7 @@
 
 	column = from & (mtd->oobsize - 1);
 
-	readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+	readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
 
 	while (read < len) {
 		cond_resched();
@@ -1612,7 +1617,7 @@
 	u_char *oob_buf = this->oob_buf;
 	int status, i, readcmd;
 
-	readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+	readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
 
 	this->command(mtd, readcmd, to, mtd->oobsize);
 	onenand_update_bufferram(mtd, to, 0);
@@ -1845,7 +1850,7 @@
 	const u_char *buf = ops->datbuf;
 	const u_char *oob = ops->oobbuf;
 	u_char *oobbuf;
-	int ret = 0;
+	int ret = 0, cmd;
 
 	DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
 		__func__, (unsigned int) to, (int) len);
@@ -1954,7 +1959,19 @@
 			ONENAND_SET_NEXT_BUFFERRAM(this);
 		}
 
-		this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+		this->ongoing = 0;
+		cmd = ONENAND_CMD_PROG;
+
+		/* Exclude 1st OTP and OTP blocks for cache program feature */
+		if (ONENAND_IS_CACHE_PROGRAM(this) &&
+		    likely(onenand_block(this, to) != 0) &&
+		    ONENAND_IS_4KB_PAGE(this) &&
+		    ((written + thislen) < len)) {
+			cmd = ONENAND_CMD_2X_CACHE_PROG;
+			this->ongoing = 1;
+		}
+
+		this->command(mtd, cmd, to, mtd->writesize);
 
 		/*
 		 * 2 PLANE, MLC, and Flex-OneNAND wait here
@@ -2067,7 +2084,7 @@
 
 	oobbuf = this->oob_buf;
 
-	oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+	oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
 
 	/* Loop until all data write */
 	while (written < len) {
@@ -2086,7 +2103,7 @@
 			memcpy(oobbuf + column, buf, thislen);
 		this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
 
-		if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) {
+		if (ONENAND_IS_4KB_PAGE(this)) {
 			/* Set main area of DataRAM to 0xff*/
 			memset(this->page_buf, 0xff, mtd->writesize);
 			this->write_bufferram(mtd, ONENAND_DATARAM,
@@ -2481,7 +2498,8 @@
 	/* Grab the lock and see if the device is available */
 	onenand_get_device(mtd, FL_ERASING);
 
-	if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
+	if (ONENAND_IS_4KB_PAGE(this) || region ||
+	    instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
 		/* region is set for Flex-OneNAND (no mb erase) */
 		ret = onenand_block_by_block_erase(mtd, instr,
 						   region, block_size);
@@ -3029,7 +3047,7 @@
 	this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
 	this->wait(mtd, FL_OTPING);
 
-	ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+	ret = ONENAND_IS_4KB_PAGE(this) ?
 		onenand_mlc_read_ops_nolock(mtd, from, &ops) :
 		onenand_read_ops_nolock(mtd, from, &ops);
 
@@ -3377,8 +3395,10 @@
 	case ONENAND_DEVICE_DENSITY_4Gb:
 		if (ONENAND_IS_DDP(this))
 			this->options |= ONENAND_HAS_2PLANE;
-		else if (numbufs == 1)
+		else if (numbufs == 1) {
 			this->options |= ONENAND_HAS_4KB_PAGE;
+			this->options |= ONENAND_HAS_CACHE_PROGRAM;
+		}
 
 	case ONENAND_DEVICE_DENSITY_2Gb:
 		/* 2Gb DDP does not have 2 plane */
@@ -3399,7 +3419,11 @@
 		break;
 	}
 
-	if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+	/* The MLC has 4KiB pagesize. */
+	if (ONENAND_IS_MLC(this))
+		this->options |= ONENAND_HAS_4KB_PAGE;
+
+	if (ONENAND_IS_4KB_PAGE(this))
 		this->options &= ~ONENAND_HAS_2PLANE;
 
 	if (FLEXONENAND(this)) {
@@ -3415,6 +3439,8 @@
 		printk(KERN_DEBUG "Chip has 2 plane\n");
 	if (this->options & ONENAND_HAS_4KB_PAGE)
 		printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
+	if (this->options & ONENAND_HAS_CACHE_PROGRAM)
+		printk(KERN_DEBUG "Chip has cache program feature\n");
 }
 
 /**
@@ -3831,7 +3857,7 @@
 	/* The data buffer size is equal to page size */
 	mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
 	/* We use the full BufferRAM */
-	if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+	if (ONENAND_IS_4KB_PAGE(this))
 		mtd->writesize <<= 1;
 
 	mtd->oobsize = mtd->writesize >> 5;
@@ -4054,6 +4080,7 @@
 	mtd->block_isbad = onenand_block_isbad;
 	mtd->block_markbad = onenand_block_markbad;
 	mtd->owner = THIS_MODULE;
+	mtd->writebufsize = mtd->writesize;
 
 	/* Unlock whole block */
 	this->unlock_all(mtd);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 01ab5b3..fc2c16a 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -91,16 +91,18 @@
 		for (j = 0; j < len; j++) {
 			/* No need to read pages fully,
 			 * just read required OOB bytes */
-			ret = onenand_bbt_read_oob(mtd, from + j * mtd->writesize + bd->offs, &ops);
+			ret = onenand_bbt_read_oob(mtd,
+				from + j * this->writesize + bd->offs, &ops);
 
 			/* If it is a initial bad block, just ignore it */
 			if (ret == ONENAND_BBT_READ_FATAL_ERROR)
 				return -EIO;
 
-			if (ret || check_short_pattern(&buf[j * scanlen], scanlen, mtd->writesize, bd)) {
+			if (ret || check_short_pattern(&buf[j * scanlen],
+					       scanlen, this->writesize, bd)) {
 				bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
-				printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
-					i >> 1, (unsigned int) from);
+				printk(KERN_INFO "OneNAND eraseblock %d is an "
+					"initial bad block\n", i >> 1);
 				mtd->ecc_stats.badblocks++;
 				break;
 			}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 0de7a05..a4c74a9 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -651,7 +651,7 @@
 	void __iomem *p;
 	void *buf = (void *) buffer;
 	dma_addr_t dma_src, dma_dst;
-	int err, page_dma = 0;
+	int err, ofs, page_dma = 0;
 	struct device *dev = &onenand->pdev->dev;
 
 	p = this->base + area;
@@ -677,10 +677,13 @@
 		if (!page)
 			goto normal;
 
+		/* Page offset */
+		ofs = ((size_t) buf & ~PAGE_MASK);
 		page_dma = 1;
+
 		/* DMA routine */
 		dma_src = onenand->phys_base + (p - this->base);
-		dma_dst = dma_map_page(dev, page, 0, count, DMA_FROM_DEVICE);
+		dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
 	} else {
 		/* DMA routine */
 		dma_src = onenand->phys_base + (p - this->base);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 5ebe280..f49e49d 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -672,7 +672,33 @@
 		ubi->nor_flash = 1;
 	}
 
-	ubi->min_io_size = ubi->mtd->writesize;
+	/*
+	 * Set UBI min. I/O size (@ubi->min_io_size). We use @mtd->writebufsize
+	 * for these purposes, not @mtd->writesize. At the moment this does not
+	 * matter for NAND, because currently @mtd->writebufsize is equivalent to
+	 * @mtd->writesize for all NANDs. However, some CFI NOR flashes may
+	 * have @mtd->writebufsize which is multiple of @mtd->writesize.
+	 *
+	 * The reason we use @mtd->writebufsize for @ubi->min_io_size is that
+	 * UBI and UBIFS recovery algorithms rely on the fact that if there was
+	 * an unclean power cut, then we can find offset of the last corrupted
+	 * node, align the offset to @ubi->min_io_size, read the rest of the
+	 * eraseblock starting from this offset, and check whether there are
+	 * only 0xFF bytes. If yes, then we are probably dealing with a
+	 * corruption caused by a power cut, if not, then this is probably some
+	 * severe corruption.
+	 *
+	 * Thus, we have to use the maximum write unit size of the flash, which
+	 * is @mtd->writebufsize, because @mtd->writesize is the minimum write
+	 * size, not the maximum.
+	 */
+	if (ubi->mtd->type == MTD_NANDFLASH)
+		ubi_assert(ubi->mtd->writebufsize == ubi->mtd->writesize);
+	else if (ubi->mtd->type == MTD_NORFLASH)
+		ubi_assert(ubi->mtd->writebufsize % ubi->mtd->writesize == 0);
+
+	ubi->min_io_size = ubi->mtd->writebufsize;
+
 	ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
 
 	/*
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fcdb7f6..0b8141f 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -425,12 +425,11 @@
 
 	/* Read both LEB 0 and LEB 1 into memory */
 	ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
-		leb[seb->lnum] = vmalloc(ubi->vtbl_size);
+		leb[seb->lnum] = vzalloc(ubi->vtbl_size);
 		if (!leb[seb->lnum]) {
 			err = -ENOMEM;
 			goto out_free;
 		}
-		memset(leb[seb->lnum], 0, ubi->vtbl_size);
 
 		err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
 				       ubi->vtbl_size);
@@ -516,10 +515,9 @@
 	int i;
 	struct ubi_vtbl_record *vtbl;
 
-	vtbl = vmalloc(ubi->vtbl_size);
+	vtbl = vzalloc(ubi->vtbl_size);
 	if (!vtbl)
 		return ERR_PTR(-ENOMEM);
-	memset(vtbl, 0, ubi->vtbl_size);
 
 	for (i = 0; i < ubi->vtbl_slots; i++)
 		memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4c8bfc9..16fe4f9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3389,8 +3389,7 @@
 
 config NETCONSOLE_DYNAMIC
 	bool "Dynamic reconfiguration of logging targets"
-	depends on NETCONSOLE && SYSFS
-	select CONFIGFS_FS
+	depends on NETCONSOLE && SYSFS && CONFIGFS_FS
 	help
 	  This option enables the ability to dynamically reconfigure target
 	  parameters (interface, IP addresses, port numbers, MAC addresses)
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 54c6d84..62d6f88 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -854,12 +854,12 @@
 }
 
 /**
- *	ks8695_get_settings - Get device-specific settings.
+ *	ks8695_wan_get_settings - Get device-specific settings.
  *	@ndev: The network device to read settings from
  *	@cmd: The ethtool structure to read into
  */
 static int
-ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
@@ -870,69 +870,50 @@
 			  SUPPORTED_TP | SUPPORTED_MII);
 	cmd->transceiver = XCVR_INTERNAL;
 
-	/* Port specific extras */
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		cmd->phy_address = 0;
-		/* not supported for HPNA */
+	cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+	cmd->port = PORT_MII;
+	cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+	cmd->phy_address = 0;
+
+	ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+	if ((ctrl & WMC_WAND) == 0) {
+		/* auto-negotiation is enabled */
+		cmd->advertising |= ADVERTISED_Autoneg;
+		if (ctrl & WMC_WANA100F)
+			cmd->advertising |= ADVERTISED_100baseT_Full;
+		if (ctrl & WMC_WANA100H)
+			cmd->advertising |= ADVERTISED_100baseT_Half;
+		if (ctrl & WMC_WANA10F)
+			cmd->advertising |= ADVERTISED_10baseT_Full;
+		if (ctrl & WMC_WANA10H)
+			cmd->advertising |= ADVERTISED_10baseT_Half;
+		if (ctrl & WMC_WANAP)
+			cmd->advertising |= ADVERTISED_Pause;
+		cmd->autoneg = AUTONEG_ENABLE;
+
+		cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+		cmd->duplex = (ctrl & WMC_WDS) ?
+			DUPLEX_FULL : DUPLEX_HALF;
+	} else {
+		/* auto-negotiation is disabled */
 		cmd->autoneg = AUTONEG_DISABLE;
 
-		/* BUG: Erm, dtype hpna implies no phy regs */
-		/*
-		ctrl = readl(KS8695_MISC_VA + KS8695_HMC);
-		cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
-		cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
-		*/
-		return -EOPNOTSUPP;
-	case KS8695_DTYPE_WAN:
-		cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
-		cmd->port = PORT_MII;
-		cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
-		cmd->phy_address = 0;
-
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-		if ((ctrl & WMC_WAND) == 0) {
-			/* auto-negotiation is enabled */
-			cmd->advertising |= ADVERTISED_Autoneg;
-			if (ctrl & WMC_WANA100F)
-				cmd->advertising |= ADVERTISED_100baseT_Full;
-			if (ctrl & WMC_WANA100H)
-				cmd->advertising |= ADVERTISED_100baseT_Half;
-			if (ctrl & WMC_WANA10F)
-				cmd->advertising |= ADVERTISED_10baseT_Full;
-			if (ctrl & WMC_WANA10H)
-				cmd->advertising |= ADVERTISED_10baseT_Half;
-			if (ctrl & WMC_WANAP)
-				cmd->advertising |= ADVERTISED_Pause;
-			cmd->autoneg = AUTONEG_ENABLE;
-
-			cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
-			cmd->duplex = (ctrl & WMC_WDS) ?
-				DUPLEX_FULL : DUPLEX_HALF;
-		} else {
-			/* auto-negotiation is disabled */
-			cmd->autoneg = AUTONEG_DISABLE;
-
-			cmd->speed = (ctrl & WMC_WANF100) ?
-				SPEED_100 : SPEED_10;
-			cmd->duplex = (ctrl & WMC_WANFF) ?
-				DUPLEX_FULL : DUPLEX_HALF;
-		}
-		break;
-	case KS8695_DTYPE_LAN:
-		return -EOPNOTSUPP;
+		cmd->speed = (ctrl & WMC_WANF100) ?
+			SPEED_100 : SPEED_10;
+		cmd->duplex = (ctrl & WMC_WANFF) ?
+			DUPLEX_FULL : DUPLEX_HALF;
 	}
 
 	return 0;
 }
 
 /**
- *	ks8695_set_settings - Set device-specific settings.
+ *	ks8695_wan_set_settings - Set device-specific settings.
  *	@ndev: The network device to configure
  *	@cmd: The settings to configure
  */
 static int
-ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
@@ -956,171 +937,85 @@
 				ADVERTISED_100baseT_Full)) == 0)
 			return -EINVAL;
 
-		switch (ksp->dtype) {
-		case KS8695_DTYPE_HPNA:
-			/* HPNA does not support auto-negotiation. */
-			return -EINVAL;
-		case KS8695_DTYPE_WAN:
-			ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
 
-			ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
-				  WMC_WANA10F | WMC_WANA10H);
-			if (cmd->advertising & ADVERTISED_100baseT_Full)
-				ctrl |= WMC_WANA100F;
-			if (cmd->advertising & ADVERTISED_100baseT_Half)
-				ctrl |= WMC_WANA100H;
-			if (cmd->advertising & ADVERTISED_10baseT_Full)
-				ctrl |= WMC_WANA10F;
-			if (cmd->advertising & ADVERTISED_10baseT_Half)
-				ctrl |= WMC_WANA10H;
+		ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
+			  WMC_WANA10F | WMC_WANA10H);
+		if (cmd->advertising & ADVERTISED_100baseT_Full)
+			ctrl |= WMC_WANA100F;
+		if (cmd->advertising & ADVERTISED_100baseT_Half)
+			ctrl |= WMC_WANA100H;
+		if (cmd->advertising & ADVERTISED_10baseT_Full)
+			ctrl |= WMC_WANA10F;
+		if (cmd->advertising & ADVERTISED_10baseT_Half)
+			ctrl |= WMC_WANA10H;
 
-			/* force a re-negotiation */
-			ctrl |= WMC_WANR;
-			writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
-			break;
-		case KS8695_DTYPE_LAN:
-			return -EOPNOTSUPP;
-		}
-
+		/* force a re-negotiation */
+		ctrl |= WMC_WANR;
+		writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
 	} else {
-		switch (ksp->dtype) {
-		case KS8695_DTYPE_HPNA:
-			/* BUG: dtype_hpna implies no phy registers */
-			/*
-			ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
+		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
 
-			ctrl &= ~(HMC_HSS | HMC_HDS);
-			if (cmd->speed == SPEED_100)
-				ctrl |= HMC_HSS;
-			if (cmd->duplex == DUPLEX_FULL)
-				ctrl |= HMC_HDS;
+		/* disable auto-negotiation */
+		ctrl |= WMC_WAND;
+		ctrl &= ~(WMC_WANF100 | WMC_WANFF);
 
-			__raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
-			*/
-			return -EOPNOTSUPP;
-		case KS8695_DTYPE_WAN:
-			ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+		if (cmd->speed == SPEED_100)
+			ctrl |= WMC_WANF100;
+		if (cmd->duplex == DUPLEX_FULL)
+			ctrl |= WMC_WANFF;
 
-			/* disable auto-negotiation */
-			ctrl |= WMC_WAND;
-			ctrl &= ~(WMC_WANF100 | WMC_WANFF);
-
-			if (cmd->speed == SPEED_100)
-				ctrl |= WMC_WANF100;
-			if (cmd->duplex == DUPLEX_FULL)
-				ctrl |= WMC_WANFF;
-
-			writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
-			break;
-		case KS8695_DTYPE_LAN:
-			return -EOPNOTSUPP;
-		}
+		writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
 	}
 
 	return 0;
 }
 
 /**
- *	ks8695_nwayreset - Restart the autonegotiation on the port.
+ *	ks8695_wan_nwayreset - Restart the autonegotiation on the port.
  *	@ndev: The network device to restart autoneotiation on
  */
 static int
-ks8695_nwayreset(struct net_device *ndev)
+ks8695_wan_nwayreset(struct net_device *ndev)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
 
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		/* No phy means no autonegotiation on hpna */
+	ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+	if ((ctrl & WMC_WAND) == 0)
+		writel(ctrl | WMC_WANR,
+		       ksp->phyiface_regs + KS8695_WMC);
+	else
+		/* auto-negotiation not enabled */
 		return -EINVAL;
-	case KS8695_DTYPE_WAN:
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
-		if ((ctrl & WMC_WAND) == 0)
-			writel(ctrl | WMC_WANR,
-			       ksp->phyiface_regs + KS8695_WMC);
-		else
-			/* auto-negotiation not enabled */
-			return -EINVAL;
-		break;
-	case KS8695_DTYPE_LAN:
-		return -EOPNOTSUPP;
-	}
 
 	return 0;
 }
 
 /**
- *	ks8695_get_link - Retrieve link status of network interface
- *	@ndev: The network interface to retrive the link status of.
- */
-static u32
-ks8695_get_link(struct net_device *ndev)
-{
-	struct ks8695_priv *ksp = netdev_priv(ndev);
-	u32 ctrl;
-
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		/* HPNA always has link */
-		return 1;
-	case KS8695_DTYPE_WAN:
-		/* WAN we can read the PHY for */
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-		return ctrl & WMC_WLS;
-	case KS8695_DTYPE_LAN:
-		return -EOPNOTSUPP;
-	}
-	return 0;
-}
-
-/**
- *	ks8695_get_pause - Retrieve network pause/flow-control advertising
+ *	ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
  *	@ndev: The device to retrieve settings from
  *	@param: The structure to fill out with the information
  */
 static void
-ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
+ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
 
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		/* No phy link on hpna to configure */
-		return;
-	case KS8695_DTYPE_WAN:
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+	ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
 
-		/* advertise Pause */
-		param->autoneg = (ctrl & WMC_WANAP);
+	/* advertise Pause */
+	param->autoneg = (ctrl & WMC_WANAP);
 
-		/* current Rx Flow-control */
-		ctrl = ks8695_readreg(ksp, KS8695_DRXC);
-		param->rx_pause = (ctrl & DRXC_RFCE);
+	/* current Rx Flow-control */
+	ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+	param->rx_pause = (ctrl & DRXC_RFCE);
 
-		/* current Tx Flow-control */
-		ctrl = ks8695_readreg(ksp, KS8695_DTXC);
-		param->tx_pause = (ctrl & DTXC_TFCE);
-		break;
-	case KS8695_DTYPE_LAN:
-		/* The LAN's "phy" is a direct-attached switch */
-		return;
-	}
-}
-
-/**
- *	ks8695_set_pause - Configure pause/flow-control
- *	@ndev: The device to configure
- *	@param: The pause parameters to set
- *
- *	TODO: Implement this
- */
-static int
-ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
-{
-	return -EOPNOTSUPP;
+	/* current Tx Flow-control */
+	ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+	param->tx_pause = (ctrl & DTXC_TFCE);
 }
 
 /**
@@ -1140,12 +1035,17 @@
 static const struct ethtool_ops ks8695_ethtool_ops = {
 	.get_msglevel	= ks8695_get_msglevel,
 	.set_msglevel	= ks8695_set_msglevel,
-	.get_settings	= ks8695_get_settings,
-	.set_settings	= ks8695_set_settings,
-	.nway_reset	= ks8695_nwayreset,
-	.get_link	= ks8695_get_link,
-	.get_pauseparam = ks8695_get_pause,
-	.set_pauseparam = ks8695_set_pause,
+	.get_drvinfo	= ks8695_get_drvinfo,
+};
+
+static const struct ethtool_ops ks8695_wan_ethtool_ops = {
+	.get_msglevel	= ks8695_get_msglevel,
+	.set_msglevel	= ks8695_set_msglevel,
+	.get_settings	= ks8695_wan_get_settings,
+	.set_settings	= ks8695_wan_set_settings,
+	.nway_reset	= ks8695_wan_nwayreset,
+	.get_link	= ethtool_op_get_link,
+	.get_pauseparam = ks8695_wan_get_pause,
 	.get_drvinfo	= ks8695_get_drvinfo,
 };
 
@@ -1541,7 +1441,6 @@
 
 	/* driver system setup */
 	ndev->netdev_ops = &ks8695_netdev_ops;
-	SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	ndev->watchdog_timeo	 = msecs_to_jiffies(watchdog);
 
 	netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
@@ -1608,12 +1507,15 @@
 	if (ksp->phyiface_regs && ksp->link_irq == -1) {
 		ks8695_init_switch(ksp);
 		ksp->dtype = KS8695_DTYPE_LAN;
+		SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	} else if (ksp->phyiface_regs && ksp->link_irq != -1) {
 		ks8695_init_wan_phy(ksp);
 		ksp->dtype = KS8695_DTYPE_WAN;
+		SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
 	} else {
 		/* No initialisation since HPNA does not have a PHY */
 		ksp->dtype = KS8695_DTYPE_HPNA;
+		SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	}
 
 	/* And bring up the net_device with the net core */
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 0b9fc51..22abfb3 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1284,19 +1284,12 @@
 {
 	u32 emac_hashhi, emac_hashlo;
 	struct netdev_hw_addr *ha;
-	char *addrs;
 	u32 crc;
 
 	emac_hashhi = emac_hashlo = 0;
 
 	netdev_for_each_mc_addr(ha, dev) {
-		addrs = ha->addr;
-
-		/* skip non-multicast addresses */
-		if (!(*addrs & 1))
-			continue;
-
-		crc = ether_crc(ETH_ALEN, addrs);
+		crc = ether_crc(ETH_ALEN, ha->addr);
 		crc >>= 26;
 
 		if (crc & 0x20)
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 99be5ae..142d604 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -275,7 +275,6 @@
 
 	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
 	if (ioc_attr) {
-		memset(ioc_attr, 0, sizeof(*ioc_attr));
 		spin_lock_irqsave(&bnad->bna_lock, flags);
 		bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 7206ab2..3437613 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3203,7 +3203,7 @@
 	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
 	int mac_off  = 0;
 
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
 	const unsigned char *addr;
 #endif
 
@@ -3354,7 +3354,7 @@
 	if (found & VPD_FOUND_MAC)
 		goto done;
 
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
 	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
 	if (addr != NULL) {
 		memcpy(dev_addr, addr, 6);
@@ -5031,7 +5031,7 @@
 	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
 	  cassini_debug;
 
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
 	cp->of_node = pci_device_to_OF_node(pdev);
 #endif
 
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index de69c54..bfab140 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3478,9 +3478,17 @@
 	struct e1000_hw *hw = &adapter->hw;
 	u32 icr = er32(ICR);
 
-	if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
+	if (unlikely((!icr)))
 		return IRQ_NONE;  /* Not our interrupt */
 
+	/*
+	 * we might have caused the interrupt, but the above
+	 * read cleared it, and just in case the driver is
+	 * down there is nothing to do so return handled
+	 */
+	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
+		return IRQ_HANDLED;
+
 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
 		hw->get_link_status = 1;
 		/* guard against interrupt when we're going down */
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 1397da1..89a6903 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -1310,7 +1310,7 @@
 		 * apply workaround for hardware errata documented in errata
 		 * docs Fixes issue where some error prone or unreliable PCIe
 		 * completions are occurring, particularly with ASPM enabled.
-		 * Without fix, issue can cause tx timeouts.
+		 * Without fix, issue can cause Tx timeouts.
 		 */
 		reg = er32(GCR2);
 		reg |= 1;
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 360c913..28519ac 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2008 Intel Corporation.
+# Copyright(c) 1999 - 2011 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 7245dc2..1314998 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 5255be7..e610e136 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e45a61c..2fefa82 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index f8ed03d..fa08b63 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index e774380..bc0860a 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -102,7 +102,7 @@
 	E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
 	E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
 #define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
-	E1000_RADV     = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
+	E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
 
 /* Convenience macros
  *
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 5bb65b7..fb46974 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index ff28721..68aa174 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -533,7 +533,7 @@
 			mac->autoneg_failed = 1;
 			return 0;
 		}
-		e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
 
 		/* Disable auto-negotiation in the TXCW register */
 		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -556,7 +556,7 @@
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
 		 */
-		e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
 		ew32(TXCW, mac->txcw);
 		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 
@@ -598,7 +598,7 @@
 			mac->autoneg_failed = 1;
 			return 0;
 		}
-		e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
 
 		/* Disable auto-negotiation in the TXCW register */
 		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -621,7 +621,7 @@
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
 		 */
-		e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
 		ew32(TXCW, mac->txcw);
 		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 
@@ -800,9 +800,9 @@
 	 * The possible values of the "fc" parameter are:
 	 *      0:  Flow control is completely disabled
 	 *      1:  Rx flow control is enabled (we can receive pause frames,
-	 *	  but not send pause frames).
+	 *          but not send pause frames).
 	 *      2:  Tx flow control is enabled (we can send pause frames but we
-	 *	  do not support receiving pause frames).
+	 *          do not support receiving pause frames).
 	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
 	 */
 	switch (hw->fc.current_mode) {
@@ -1031,9 +1031,9 @@
 	 * The possible values of the "fc" parameter are:
 	 *      0:  Flow control is completely disabled
 	 *      1:  Rx flow control is enabled (we can receive pause
-	 *	  frames but not send pause frames).
+	 *          frames but not send pause frames).
 	 *      2:  Tx flow control is enabled (we can send pause frames
-	 *	  frames but we do not receive pause frames).
+	 *          frames but we do not receive pause frames).
 	 *      3:  Both Rx and Tx flow control (symmetric) is enabled.
 	 *  other:  No other values should be possible at this point.
 	 */
@@ -1189,7 +1189,7 @@
 			} else {
 				hw->fc.current_mode = e1000_fc_rx_pause;
 				e_dbg("Flow Control = "
-					 "RX PAUSE frames only.\r\n");
+				      "Rx PAUSE frames only.\r\n");
 			}
 		}
 		/*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fa5b604..1c18f26 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -77,17 +77,17 @@
 	char *name;
 };
 
-#define E1000_RDFH	0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT	0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS	0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS	0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC	0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_RDFH	0x02410	/* Rx Data FIFO Head - RW */
+#define E1000_RDFT	0x02418	/* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS	0x02420	/* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS	0x02428	/* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC	0x02430	/* Rx Data FIFO Packet Count - RW */
 
-#define E1000_TDFH	0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT	0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS	0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS	0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC	0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDFH	0x03410	/* Tx Data FIFO Head - RW */
+#define E1000_TDFT	0x03418	/* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS	0x03420	/* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS	0x03428	/* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC	0x03430	/* Tx Data FIFO Packet Count - RW */
 
 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
 
@@ -99,7 +99,7 @@
 	/* Interrupt Registers */
 	{E1000_ICR, "ICR"},
 
-	/* RX Registers */
+	/* Rx Registers */
 	{E1000_RCTL, "RCTL"},
 	{E1000_RDLEN, "RDLEN"},
 	{E1000_RDH, "RDH"},
@@ -115,7 +115,7 @@
 	{E1000_RDFTS, "RDFTS"},
 	{E1000_RDFPC, "RDFPC"},
 
-	/* TX Registers */
+	/* Tx Registers */
 	{E1000_TCTL, "TCTL"},
 	{E1000_TDBAL, "TDBAL"},
 	{E1000_TDBAH, "TDBAH"},
@@ -160,7 +160,7 @@
 		break;
 	default:
 		printk(KERN_INFO "%-15s %08x\n",
-			reginfo->name, __er32(hw, reginfo->ofs));
+		       reginfo->name, __er32(hw, reginfo->ofs));
 		return;
 	}
 
@@ -171,9 +171,8 @@
 	printk(KERN_CONT "\n");
 }
 
-
 /*
- * e1000e_dump - Print registers, tx-ring and rx-ring
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
  */
 static void e1000e_dump(struct e1000_adapter *adapter)
 {
@@ -182,12 +181,20 @@
 	struct e1000_reg_info *reginfo;
 	struct e1000_ring *tx_ring = adapter->tx_ring;
 	struct e1000_tx_desc *tx_desc;
-	struct my_u0 { u64 a; u64 b; } *u0;
+	struct my_u0 {
+		u64 a;
+		u64 b;
+	} *u0;
 	struct e1000_buffer *buffer_info;
 	struct e1000_ring *rx_ring = adapter->rx_ring;
 	union e1000_rx_desc_packet_split *rx_desc_ps;
 	struct e1000_rx_desc *rx_desc;
-	struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+	struct my_u1 {
+		u64 a;
+		u64 b;
+		u64 c;
+		u64 d;
+	} *u1;
 	u32 staterr;
 	int i = 0;
 
@@ -198,12 +205,10 @@
 	if (netdev) {
 		dev_info(&adapter->pdev->dev, "Net device Info\n");
 		printk(KERN_INFO "Device Name     state            "
-			"trans_start      last_rx\n");
+		       "trans_start      last_rx\n");
 		printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
-			netdev->name,
-			netdev->state,
-			netdev->trans_start,
-			netdev->last_rx);
+		       netdev->name, netdev->state, netdev->trans_start,
+		       netdev->last_rx);
 	}
 
 	/* Print Registers */
@@ -214,26 +219,26 @@
 		e1000_regdump(hw, reginfo);
 	}
 
-	/* Print TX Ring Summary */
+	/* Print Tx Ring Summary */
 	if (!netdev || !netif_running(netdev))
 		goto exit;
 
-	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
 	printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
-		" leng ntw timestamp\n");
+	       " leng ntw timestamp\n");
 	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
 	printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
-		0, tx_ring->next_to_use, tx_ring->next_to_clean,
-		(unsigned long long)buffer_info->dma,
-		buffer_info->length,
-		buffer_info->next_to_watch,
-		(unsigned long long)buffer_info->time_stamp);
+	       0, tx_ring->next_to_use, tx_ring->next_to_clean,
+	       (unsigned long long)buffer_info->dma,
+	       buffer_info->length,
+	       buffer_info->next_to_watch,
+	       (unsigned long long)buffer_info->time_stamp);
 
-	/* Print TX Rings */
+	/* Print Tx Ring */
 	if (!netif_msg_tx_done(adapter))
 		goto rx_ring_summary;
 
-	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
 
 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
 	 *
@@ -263,22 +268,22 @@
 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 	 */
 	printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
-		" [bi->dma       ] leng  ntw timestamp        bi->skb "
-		"<-- Legacy format\n");
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Legacy format\n");
 	printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
-		" [bi->dma       ] leng  ntw timestamp        bi->skb "
-		"<-- Ext Context format\n");
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Context format\n");
 	printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
-		" [bi->dma       ] leng  ntw timestamp        bi->skb "
-		"<-- Ext Data format\n");
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Data format\n");
 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 		tx_desc = E1000_TX_DESC(*tx_ring, i);
 		buffer_info = &tx_ring->buffer_info[i];
 		u0 = (struct my_u0 *)tx_desc;
 		printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
-			"%04X  %3X %016llX %p",
-		       (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
-			((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+		       "%04X  %3X %016llX %p",
+		       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+			((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
 		       (unsigned long long)le64_to_cpu(u0->a),
 		       (unsigned long long)le64_to_cpu(u0->b),
 		       (unsigned long long)buffer_info->dma,
@@ -296,22 +301,22 @@
 
 		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
 			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
-					16, 1, phys_to_virt(buffer_info->dma),
-					buffer_info->length, true);
+				       16, 1, phys_to_virt(buffer_info->dma),
+				       buffer_info->length, true);
 	}
 
-	/* Print RX Rings Summary */
+	/* Print Rx Ring Summary */
 rx_ring_summary:
-	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
 	printk(KERN_INFO "Queue [NTU] [NTC]\n");
 	printk(KERN_INFO " %5d %5X %5X\n", 0,
-		rx_ring->next_to_use, rx_ring->next_to_clean);
+	       rx_ring->next_to_use, rx_ring->next_to_clean);
 
-	/* Print RX Rings */
+	/* Print Rx Ring */
 	if (!netif_msg_rx_status(adapter))
 		goto exit;
 
-	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
 	switch (adapter->rx_ps_pages) {
 	case 1:
 	case 2:
@@ -329,7 +334,7 @@
 		 *    +-----------------------------------------------------+
 		 */
 		printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
-			"[buffer 1 63:0 ] "
+		       "[buffer 1 63:0 ] "
 		       "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
 		       "[bi->skb] <-- Ext Pkt Split format\n");
 		/* [Extended] Receive Descriptor (Write-Back) Format
@@ -344,7 +349,7 @@
 		 *   63       48 47    32 31            20 19               0
 		 */
 		printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
-			"[vl   l0 ee  es] "
+		       "[vl   l0 ee  es] "
 		       "[ l3  l2  l1 hs] [reserved      ] ---------------- "
 		       "[bi->skb] <-- Ext Rx Write-Back format\n");
 		for (i = 0; i < rx_ring->count; i++) {
@@ -352,26 +357,26 @@
 			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
 			u1 = (struct my_u1 *)rx_desc_ps;
 			staterr =
-				le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
 			if (staterr & E1000_RXD_STAT_DD) {
 				/* Descriptor Done */
 				printk(KERN_INFO "RWB[0x%03X]     %016llX "
-					"%016llX %016llX %016llX "
-					"---------------- %p", i,
-					(unsigned long long)le64_to_cpu(u1->a),
-					(unsigned long long)le64_to_cpu(u1->b),
-					(unsigned long long)le64_to_cpu(u1->c),
-					(unsigned long long)le64_to_cpu(u1->d),
-					buffer_info->skb);
+				       "%016llX %016llX %016llX "
+				       "---------------- %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       buffer_info->skb);
 			} else {
 				printk(KERN_INFO "R  [0x%03X]     %016llX "
-					"%016llX %016llX %016llX %016llX %p", i,
-					(unsigned long long)le64_to_cpu(u1->a),
-					(unsigned long long)le64_to_cpu(u1->b),
-					(unsigned long long)le64_to_cpu(u1->c),
-					(unsigned long long)le64_to_cpu(u1->d),
-					(unsigned long long)buffer_info->dma,
-					buffer_info->skb);
+				       "%016llX %016llX %016llX %016llX %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       (unsigned long long)buffer_info->dma,
+				       buffer_info->skb);
 
 				if (netif_msg_pktdata(adapter))
 					print_hex_dump(KERN_INFO, "",
@@ -400,18 +405,18 @@
 		 * 63       48 47    40 39      32 31         16 15      0
 		 */
 		printk(KERN_INFO "Rl[desc]     [address 63:0  ] "
-			"[vl er S cks ln] [bi->dma       ] [bi->skb] "
-			"<-- Legacy format\n");
+		       "[vl er S cks ln] [bi->dma       ] [bi->skb] "
+		       "<-- Legacy format\n");
 		for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
 			rx_desc = E1000_RX_DESC(*rx_ring, i);
 			buffer_info = &rx_ring->buffer_info[i];
 			u0 = (struct my_u0 *)rx_desc;
 			printk(KERN_INFO "Rl[0x%03X]    %016llX %016llX "
-				"%016llX %p", i,
-				(unsigned long long)le64_to_cpu(u0->a),
-				(unsigned long long)le64_to_cpu(u0->b),
-				(unsigned long long)buffer_info->dma,
-				buffer_info->skb);
+			       "%016llX %p", i,
+			       (unsigned long long)le64_to_cpu(u0->a),
+			       (unsigned long long)le64_to_cpu(u0->b),
+			       (unsigned long long)buffer_info->dma,
+			       buffer_info->skb);
 			if (i == rx_ring->next_to_use)
 				printk(KERN_CONT " NTU\n");
 			else if (i == rx_ring->next_to_clean)
@@ -421,9 +426,10 @@
 
 			if (netif_msg_pktdata(adapter))
 				print_hex_dump(KERN_INFO, "",
-					DUMP_PREFIX_ADDRESS,
-					16, 1, phys_to_virt(buffer_info->dma),
-					adapter->rx_buffer_len, true);
+					       DUMP_PREFIX_ADDRESS,
+					       16, 1,
+					       phys_to_virt(buffer_info->dma),
+					       adapter->rx_buffer_len, true);
 		}
 	}
 
@@ -450,8 +456,7 @@
  * @skb: pointer to sk_buff to be indicated to stack
  **/
 static void e1000_receive_skb(struct e1000_adapter *adapter,
-			      struct net_device *netdev,
-			      struct sk_buff *skb,
+			      struct net_device *netdev, struct sk_buff *skb,
 			      u8 status, __le16 vlan)
 {
 	skb->protocol = eth_type_trans(skb, netdev);
@@ -464,7 +469,7 @@
 }
 
 /**
- * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * e1000_rx_checksum - Receive Checksum Offload
  * @adapter:     board private structure
  * @status_err:  receive descriptor status and error fields
  * @csum:	receive descriptor csum field
@@ -548,7 +553,7 @@
 						  adapter->rx_buffer_len,
 						  DMA_FROM_DEVICE);
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
-			dev_err(&pdev->dev, "RX DMA map failed\n");
+			dev_err(&pdev->dev, "Rx DMA map failed\n");
 			adapter->rx_dma_failed++;
 			break;
 		}
@@ -601,7 +606,8 @@
 			ps_page = &buffer_info->ps_pages[j];
 			if (j >= adapter->rx_ps_pages) {
 				/* all unused desc entries get hw null ptr */
-				rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
+				rx_desc->read.buffer_addr[j + 1] =
+				    ~cpu_to_le64(0);
 				continue;
 			}
 			if (!ps_page->page) {
@@ -617,7 +623,7 @@
 				if (dma_mapping_error(&pdev->dev,
 						      ps_page->dma)) {
 					dev_err(&adapter->pdev->dev,
-					  "RX DMA page map failed\n");
+						"Rx DMA page map failed\n");
 					adapter->rx_dma_failed++;
 					goto no_buffers;
 				}
@@ -627,8 +633,8 @@
 			 * didn't change because each write-back
 			 * erases this info.
 			 */
-			rx_desc->read.buffer_addr[j+1] =
-			     cpu_to_le64(ps_page->dma);
+			rx_desc->read.buffer_addr[j + 1] =
+			    cpu_to_le64(ps_page->dma);
 		}
 
 		skb = netdev_alloc_skb_ip_align(netdev,
@@ -644,7 +650,7 @@
 						  adapter->rx_ps_bsize0,
 						  DMA_FROM_DEVICE);
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
-			dev_err(&pdev->dev, "RX DMA map failed\n");
+			dev_err(&pdev->dev, "Rx DMA map failed\n");
 			adapter->rx_dma_failed++;
 			/* cleanup skb */
 			dev_kfree_skb_any(skb);
@@ -662,7 +668,7 @@
 			 * such as IA-64).
 			 */
 			wmb();
-			writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+			writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
 		}
 
 		i++;
@@ -1106,11 +1112,10 @@
 		cleaned = 1;
 		cleaned_count++;
 		dma_unmap_single(&pdev->dev, buffer_info->dma,
-				 adapter->rx_ps_bsize0,
-				 DMA_FROM_DEVICE);
+				 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
 		buffer_info->dma = 0;
 
-		/* see !EOP comment in other rx routine */
+		/* see !EOP comment in other Rx routine */
 		if (!(staterr & E1000_RXD_STAT_EOP))
 			adapter->flags2 |= FLAG2_IS_DISCARDING;
 
@@ -2610,7 +2615,7 @@
 }
 
 /**
- * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * e1000_configure_tx - Configure Transmit Unit after Reset
  * @adapter: board private structure
  *
  * Configure the Tx unit of the MAC after a reset.
@@ -2663,7 +2668,7 @@
 		 * hthresh = 1 ==> prefetch when one or more available
 		 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
 		 * BEWARE: this seems to work but should be considered first if
-		 * there are tx hangs or other tx related bugs
+		 * there are Tx hangs or other Tx related bugs
 		 */
 		txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
 		ew32(TXDCTL(0), txdctl);
@@ -2877,7 +2882,7 @@
 	if (adapter->rx_ps_pages) {
 		/* this is a 32 byte descriptor */
 		rdlen = rx_ring->count *
-			sizeof(union e1000_rx_desc_packet_split);
+		    sizeof(union e1000_rx_desc_packet_split);
 		adapter->clean_rx = e1000_clean_rx_irq_ps;
 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
 	} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
@@ -2900,7 +2905,7 @@
 		/*
 		 * set the writeback threshold (only takes effect if the RDTR
 		 * is set). set GRAN=1 and write back up to 0x4 worth, and
-		 * enable prefetching of 0x20 rx descriptors
+		 * enable prefetching of 0x20 Rx descriptors
 		 * granularity = 01
 		 * wthresh = 04,
 		 * hthresh = 04,
@@ -2981,12 +2986,10 @@
 			 * excessive C-state transition latencies result in
 			 * dropped transactions.
 			 */
-			pm_qos_update_request(
-				&adapter->netdev->pm_qos_req, 55);
+			pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
 		} else {
-			pm_qos_update_request(
-				&adapter->netdev->pm_qos_req,
-				PM_QOS_DEFAULT_VALUE);
+			pm_qos_update_request(&adapter->netdev->pm_qos_req,
+					      PM_QOS_DEFAULT_VALUE);
 		}
 	}
 
@@ -3152,7 +3155,7 @@
 		/* lower 16 bits has Rx packet buffer allocation size in KB */
 		pba &= 0xffff;
 		/*
-		 * the Tx fifo also stores 16 bytes of information about the tx
+		 * the Tx fifo also stores 16 bytes of information about the Tx
 		 * but don't include ethernet FCS because hardware appends it
 		 */
 		min_tx_space = (adapter->max_frame_size +
@@ -3175,7 +3178,7 @@
 			pba -= min_tx_space - tx_space;
 
 			/*
-			 * if short on Rx space, Rx wins and must trump tx
+			 * if short on Rx space, Rx wins and must trump Tx
 			 * adjustment or use Early Receive if available
 			 */
 			if ((pba < min_rx_space) &&
@@ -4039,11 +4042,11 @@
 	       adapter->netdev->name,
 	       adapter->link_speed,
 	       (adapter->link_duplex == FULL_DUPLEX) ?
-	                        "Full Duplex" : "Half Duplex",
+	       "Full Duplex" : "Half Duplex",
 	       ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
-	                        "RX/TX" :
-	       ((ctrl & E1000_CTRL_RFCE) ? "RX" :
-	       ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
+	       "Rx/Tx" :
+	       ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+		((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
 }
 
 static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -4338,7 +4341,7 @@
 	/* Force detection of hung controller every watchdog period */
 	adapter->detect_tx_hung = 1;
 
-	/* flush partial descriptors to memory before detecting tx hang */
+	/* flush partial descriptors to memory before detecting Tx hang */
 	if (adapter->flags2 & FLAG2_DMA_BURST) {
 		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
 		ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
@@ -4529,7 +4532,7 @@
 		buffer_info->next_to_watch = i;
 		buffer_info->dma = dma_map_single(&pdev->dev,
 						  skb->data + offset,
-						  size,	DMA_TO_DEVICE);
+						  size, DMA_TO_DEVICE);
 		buffer_info->mapped_as_page = false;
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
 			goto dma_error;
@@ -4576,7 +4579,7 @@
 		}
 	}
 
-	segs = skb_shinfo(skb)->gso_segs ?: 1;
+	segs = skb_shinfo(skb)->gso_segs ? : 1;
 	/* multiply data chunks by size of headers */
 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
 
@@ -4588,13 +4591,13 @@
 	return count;
 
 dma_error:
-	dev_err(&pdev->dev, "TX DMA map failed\n");
+	dev_err(&pdev->dev, "Tx DMA map failed\n");
 	buffer_info->dma = 0;
 	if (count)
 		count--;
 
 	while (count--) {
-		if (i==0)
+		if (i == 0)
 			i += tx_ring->count;
 		i--;
 		buffer_info = &tx_ring->buffer_info[i];
@@ -6193,7 +6196,7 @@
 	int ret;
 	pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
 		e1000e_driver_version);
-	pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
+	pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
 	ret = pci_register_driver(&e1000_driver);
 
 	return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a9612b0..4dd9b63 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -62,10 +62,9 @@
 	module_param_array_named(X, X, int, &num_##X, 0);	\
 	MODULE_PARM_DESC(X, desc);
 
-
 /*
  * Transmit Interrupt Delay in units of 1.024 microseconds
- * Tx interrupt delay needs to typically be set to something non zero
+ * Tx interrupt delay needs to typically be set to something non-zero
  *
  * Valid Range: 0-65535
  */
@@ -112,6 +111,7 @@
 #define DEFAULT_ITR 3
 #define MAX_ITR 100000
 #define MIN_ITR 100
+
 /* IntMode (Interrupt Mode)
  *
  * Valid Range: 0 - 2
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 00f89e8..6bea051 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -640,7 +640,7 @@
 	s32 ret_val;
 	u16 phy_data;
 
-	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
 	ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
 	if (ret_val)
 		goto out;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6de4675..119aa20 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -434,7 +434,6 @@
 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct netdev_queue *txq;
 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
 	unsigned long tx_packets = 0, tx_bytes = 0;
 	int i = 0;
@@ -450,9 +449,8 @@
 	dev->stats.rx_dropped = rx_dropped;
 
 	for (i = 0; i < priv->num_tx_queues; i++) {
-		txq = netdev_get_tx_queue(dev, i);
-		tx_bytes += txq->tx_bytes;
-		tx_packets += txq->tx_packets;
+		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+		tx_packets += priv->tx_queue[i]->stats.tx_packets;
 	}
 
 	dev->stats.tx_bytes = tx_bytes;
@@ -2109,8 +2107,8 @@
 	}
 
 	/* Update transmit stats */
-	txq->tx_bytes += skb->len;
-	txq->tx_packets ++;
+	tx_queue->stats.tx_bytes += skb->len;
+	tx_queue->stats.tx_packets++;
 
 	txbdp = txbdp_start = tx_queue->cur_tx;
 	lstatus = txbdp->lstatus;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 68984eb..54de413 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -907,12 +907,21 @@
 	MQ_MG_MODE
 };
 
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+	unsigned long tx_packets;
+	unsigned long tx_bytes;
+};
+
 /**
  *	struct gfar_priv_tx_q - per tx queue structure
  *	@txlock: per queue tx spin lock
  *	@tx_skbuff:skb pointers
  *	@skb_curtx: to be used skb pointer
  *	@skb_dirtytx:the last used skb pointer
+ *	@stats: bytes/packets stats
  *	@qindex: index of this queue
  *	@dev: back pointer to the dev structure
  *	@grp: back pointer to the group to which this queue belongs
@@ -934,6 +943,7 @@
 	struct	txbd8 *tx_bd_base;
 	struct	txbd8 *cur_tx;
 	struct	txbd8 *dirty_tx;
+	struct tx_q_stats stats;
 	struct	net_device *dev;
 	struct gfar_priv_grp *grp;
 	u16	skb_curtx;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 27d6960..fdb0333 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
 /*
  * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
  *
- * 2005-2009 (c) Aeroflex Gaisler AB
+ * 2005-2010 (c) Aeroflex Gaisler AB
  *
  * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
  * available in the GRLIB VHDL IP core library.
@@ -356,6 +356,8 @@
 		dev_dbg(&dev->dev, " starting queue\n");
 	netif_start_queue(dev);
 
+	GRETH_REGSAVE(greth->regs->status, 0xFF);
+
 	napi_enable(&greth->napi);
 
 	greth_enable_irqs(greth);
@@ -371,7 +373,9 @@
 
 	napi_disable(&greth->napi);
 
+	greth_disable_irqs(greth);
 	greth_disable_tx(greth);
+	greth_disable_rx(greth);
 
 	netif_stop_queue(dev);
 
@@ -388,12 +392,20 @@
 	struct greth_private *greth = netdev_priv(dev);
 	struct greth_bd *bdp;
 	int err = NETDEV_TX_OK;
-	u32 status, dma_addr;
+	u32 status, dma_addr, ctrl;
+	unsigned long flags;
 
-	bdp = greth->tx_bd_base + greth->tx_next;
+	/* Clean TX Ring */
+	greth_clean_tx(greth->netdev);
 
 	if (unlikely(greth->tx_free <= 0)) {
+		spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+		ctrl = GRETH_REGLOAD(greth->regs->control);
+		/* Enable TX IRQ only if not already in poll() routine */
+		if (ctrl & GRETH_RXI)
+			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -406,13 +418,14 @@
 		goto out;
 	}
 
+	bdp = greth->tx_bd_base + greth->tx_next;
 	dma_addr = greth_read_bd(&bdp->addr);
 
 	memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
 
 	dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
 
-	status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
+	status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
 
 	/* Wrap around descriptor ring */
 	if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -422,22 +435,11 @@
 	greth->tx_next = NEXT_TX(greth->tx_next);
 	greth->tx_free--;
 
-	/* No more descriptors */
-	if (unlikely(greth->tx_free == 0)) {
-
-		/* Free transmitted descriptors */
-		greth_clean_tx(dev);
-
-		/* If nothing was cleaned, stop queue & wait for irq */
-		if (unlikely(greth->tx_free == 0)) {
-			status |= GRETH_BD_IE;
-			netif_stop_queue(dev);
-		}
-	}
-
 	/* Write descriptor control word and enable transmission */
 	greth_write_bd(&bdp->stat, status);
+	spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 	greth_enable_tx(greth);
+	spin_unlock_irqrestore(&greth->devlock, flags);
 
 out:
 	dev_kfree_skb(skb);
@@ -450,13 +452,23 @@
 {
 	struct greth_private *greth = netdev_priv(dev);
 	struct greth_bd *bdp;
-	u32 status = 0, dma_addr;
+	u32 status = 0, dma_addr, ctrl;
 	int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+	unsigned long flags;
 
 	nr_frags = skb_shinfo(skb)->nr_frags;
 
+	/* Clean TX Ring */
+	greth_clean_tx_gbit(dev);
+
 	if (greth->tx_free < nr_frags + 1) {
+		spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+		ctrl = GRETH_REGLOAD(greth->regs->control);
+		/* Enable TX IRQ only if not already in poll() routine */
+		if (ctrl & GRETH_RXI)
+			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 		err = NETDEV_TX_BUSY;
 		goto out;
 	}
@@ -499,7 +511,7 @@
 		greth->tx_skbuff[curr_tx] = NULL;
 		bdp = greth->tx_bd_base + curr_tx;
 
-		status = GRETH_TXBD_CSALL;
+		status = GRETH_TXBD_CSALL | GRETH_BD_EN;
 		status |= frag->size & GRETH_BD_LEN;
 
 		/* Wrap around descriptor ring */
@@ -509,14 +521,8 @@
 		/* More fragments left */
 		if (i < nr_frags - 1)
 			status |= GRETH_TXBD_MORE;
-
-		/* ... last fragment, check if out of descriptors  */
-		else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
-
-			/* Enable interrupts and stop queue */
-			status |= GRETH_BD_IE;
-			netif_stop_queue(dev);
-		}
+		else
+			status |= GRETH_BD_IE; /* enable IRQ on last fragment */
 
 		greth_write_bd(&bdp->stat, status);
 
@@ -536,26 +542,29 @@
 
 	wmb();
 
-	/* Enable the descriptors that we configured ...  */
-	for (i = 0; i < nr_frags + 1; i++) {
-		bdp = greth->tx_bd_base + greth->tx_next;
-		greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
-		greth->tx_next = NEXT_TX(greth->tx_next);
-		greth->tx_free--;
-	}
+	/* Enable the descriptor chain by enabling the first descriptor */
+	bdp = greth->tx_bd_base + greth->tx_next;
+	greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+	greth->tx_next = curr_tx;
+	greth->tx_free -= nr_frags + 1;
 
+	wmb();
+
+	spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 	greth_enable_tx(greth);
+	spin_unlock_irqrestore(&greth->devlock, flags);
 
 	return NETDEV_TX_OK;
 
 frag_map_error:
-	/* Unmap SKB mappings that succeeded */
+	/* Unmap SKB mappings that succeeded and disable descriptor */
 	for (i = 0; greth->tx_next + i != curr_tx; i++) {
 		bdp = greth->tx_bd_base + greth->tx_next + i;
 		dma_unmap_single(greth->dev,
 				 greth_read_bd(&bdp->addr),
 				 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
 				 DMA_TO_DEVICE);
+		greth_write_bd(&bdp->stat, 0);
 	}
 map_error:
 	if (net_ratelimit())
@@ -565,12 +574,11 @@
 	return err;
 }
 
-
 static irqreturn_t greth_interrupt(int irq, void *dev_id)
 {
 	struct net_device *dev = dev_id;
 	struct greth_private *greth;
-	u32 status;
+	u32 status, ctrl;
 	irqreturn_t retval = IRQ_NONE;
 
 	greth = netdev_priv(dev);
@@ -580,13 +588,15 @@
 	/* Get the interrupt events that caused us to be here. */
 	status = GRETH_REGLOAD(greth->regs->status);
 
+	/* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
+	 * set regardless of whether IRQ is enabled or not. Especially
+	 * important when shared IRQ.
+	 */
+	ctrl = GRETH_REGLOAD(greth->regs->control);
+
 	/* Handle rx and tx interrupts through poll */
-	if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
-
-		/* Clear interrupt status */
-		GRETH_REGORIN(greth->regs->status,
-			      status & (GRETH_INT_RX | GRETH_INT_TX));
-
+	if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
+	    ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
 		retval = IRQ_HANDLED;
 
 		/* Disable interrupts and schedule poll() */
@@ -610,6 +620,8 @@
 
 	while (1) {
 		bdp = greth->tx_bd_base + greth->tx_last;
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+		mb();
 		stat = greth_read_bd(&bdp->stat);
 
 		if (unlikely(stat & GRETH_BD_EN))
@@ -670,7 +682,10 @@
 
 		/* We only clean fully completed SKBs */
 		bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
-		stat = bdp_last_frag->stat;
+
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+		mb();
+		stat = greth_read_bd(&bdp_last_frag->stat);
 
 		if (stat & GRETH_BD_EN)
 			break;
@@ -702,21 +717,9 @@
 		greth->tx_free += nr_frags+1;
 		dev_kfree_skb(skb);
 	}
-	if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
-		netif_wake_queue(dev);
-	}
-}
 
-static int greth_pending_packets(struct greth_private *greth)
-{
-	struct greth_bd *bdp;
-	u32 status;
-	bdp = greth->rx_bd_base + greth->rx_cur;
-	status = greth_read_bd(&bdp->stat);
-	if (status & GRETH_BD_EN)
-		return 0;
-	else
-		return 1;
+	if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
+		netif_wake_queue(dev);
 }
 
 static int greth_rx(struct net_device *dev, int limit)
@@ -727,20 +730,24 @@
 	int pkt_len;
 	int bad, count;
 	u32 status, dma_addr;
+	unsigned long flags;
 
 	greth = netdev_priv(dev);
 
 	for (count = 0; count < limit; ++count) {
 
 		bdp = greth->rx_bd_base + greth->rx_cur;
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+		mb();
 		status = greth_read_bd(&bdp->stat);
-		dma_addr = greth_read_bd(&bdp->addr);
-		bad = 0;
 
 		if (unlikely(status & GRETH_BD_EN)) {
 			break;
 		}
 
+		dma_addr = greth_read_bd(&bdp->addr);
+		bad = 0;
+
 		/* Check status for errors. */
 		if (unlikely(status & GRETH_RXBD_STATUS)) {
 			if (status & GRETH_RXBD_ERR_FT) {
@@ -802,7 +809,9 @@
 
 		dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
 
+		spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
 		greth_enable_rx(greth);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 
 		greth->rx_cur = NEXT_RX(greth->rx_cur);
 	}
@@ -836,6 +845,7 @@
 	int pkt_len;
 	int bad, count = 0;
 	u32 status, dma_addr;
+	unsigned long flags;
 
 	greth = netdev_priv(dev);
 
@@ -843,6 +853,8 @@
 
 		bdp = greth->rx_bd_base + greth->rx_cur;
 		skb = greth->rx_skbuff[greth->rx_cur];
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+		mb();
 		status = greth_read_bd(&bdp->stat);
 		bad = 0;
 
@@ -865,10 +877,9 @@
 			}
 		}
 
-		/* Allocate new skb to replace current */
-		newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
-
-		if (!bad && newskb) {
+		/* Allocate new skb to replace current, not needed if the
+		 * current skb can be reused */
+		if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
 			skb_reserve(newskb, NET_IP_ALIGN);
 
 			dma_addr = dma_map_single(greth->dev,
@@ -905,11 +916,22 @@
 				if (net_ratelimit())
 					dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
 				dev_kfree_skb(newskb);
+				/* reusing current skb, so it is a drop */
 				dev->stats.rx_dropped++;
 			}
+		} else if (bad) {
+			/* Bad Frame transfer, the skb is reused */
+			dev->stats.rx_dropped++;
 		} else {
+			/* Failed Allocating a new skb. This is rather stupid
+			 * but the current "filled" skb is reused, as if
+			 * transfer failure. One could argue that RX descriptor
+			 * table handling should be divided into cleaning and
+			 * filling as the TX part of the driver
+			 */
 			if (net_ratelimit())
 				dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+			/* reusing current skb, so it is a drop */
 			dev->stats.rx_dropped++;
 		}
 
@@ -920,7 +942,9 @@
 
 		wmb();
 		greth_write_bd(&bdp->stat, status);
+		spin_lock_irqsave(&greth->devlock, flags);
 		greth_enable_rx(greth);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 		greth->rx_cur = NEXT_RX(greth->rx_cur);
 	}
 
@@ -932,15 +956,18 @@
 {
 	struct greth_private *greth;
 	int work_done = 0;
+	unsigned long flags;
+	u32 mask, ctrl;
 	greth = container_of(napi, struct greth_private, napi);
 
-	if (greth->gbit_mac) {
-		greth_clean_tx_gbit(greth->netdev);
-	} else {
-		greth_clean_tx(greth->netdev);
+restart_txrx_poll:
+	if (netif_queue_stopped(greth->netdev)) {
+		if (greth->gbit_mac)
+			greth_clean_tx_gbit(greth->netdev);
+		else
+			greth_clean_tx(greth->netdev);
 	}
 
-restart_poll:
 	if (greth->gbit_mac) {
 		work_done += greth_rx_gbit(greth->netdev, budget - work_done);
 	} else {
@@ -949,15 +976,29 @@
 
 	if (work_done < budget) {
 
-		napi_complete(napi);
+		spin_lock_irqsave(&greth->devlock, flags);
 
-		if (greth_pending_packets(greth)) {
-			napi_reschedule(napi);
-			goto restart_poll;
+		ctrl = GRETH_REGLOAD(greth->regs->control);
+		if (netif_queue_stopped(greth->netdev)) {
+			GRETH_REGSAVE(greth->regs->control,
+					ctrl | GRETH_TXI | GRETH_RXI);
+			mask = GRETH_INT_RX | GRETH_INT_RE |
+			       GRETH_INT_TX | GRETH_INT_TE;
+		} else {
+			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
+			mask = GRETH_INT_RX | GRETH_INT_RE;
+		}
+
+		if (GRETH_REGLOAD(greth->regs->status) & mask) {
+			GRETH_REGSAVE(greth->regs->control, ctrl);
+			spin_unlock_irqrestore(&greth->devlock, flags);
+			goto restart_txrx_poll;
+		} else {
+			__napi_complete(napi);
+			spin_unlock_irqrestore(&greth->devlock, flags);
 		}
 	}
 
-	greth_enable_irqs(greth);
 	return work_done;
 }
 
@@ -1152,11 +1193,11 @@
 };
 
 static struct net_device_ops greth_netdev_ops = {
-	.ndo_open = greth_open,
-	.ndo_stop = greth_close,
-	.ndo_start_xmit = greth_start_xmit,
-	.ndo_set_mac_address = greth_set_mac_add,
-	.ndo_validate_addr 	= eth_validate_addr,
+	.ndo_open		= greth_open,
+	.ndo_stop		= greth_close,
+	.ndo_start_xmit		= greth_start_xmit,
+	.ndo_set_mac_address	= greth_set_mac_add,
+	.ndo_validate_addr	= eth_validate_addr,
 };
 
 static inline int wait_for_mdio(struct greth_private *greth)
@@ -1217,29 +1258,26 @@
 	struct greth_private *greth = netdev_priv(dev);
 	struct phy_device *phydev = greth->phy;
 	unsigned long flags;
-
 	int status_change = 0;
+	u32 ctrl;
 
 	spin_lock_irqsave(&greth->devlock, flags);
 
 	if (phydev->link) {
 
 		if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
-
-			GRETH_REGANDIN(greth->regs->control,
-				       ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
+			ctrl = GRETH_REGLOAD(greth->regs->control) &
+			       ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
 
 			if (phydev->duplex)
-				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
+				ctrl |= GRETH_CTRL_FD;
 
-			if (phydev->speed == SPEED_100) {
-
-				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
-			}
-
+			if (phydev->speed == SPEED_100)
+				ctrl |= GRETH_CTRL_SP;
 			else if (phydev->speed == SPEED_1000)
-				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
+				ctrl |= GRETH_CTRL_GB;
 
+			GRETH_REGSAVE(greth->regs->control, ctrl);
 			greth->speed = phydev->speed;
 			greth->duplex = phydev->duplex;
 			status_change = 1;
@@ -1600,6 +1638,9 @@
 	{
 	 .name = "GAISLER_ETHMAC",
 	 },
+	{
+	 .name = "01_01d",
+	 },
 	{},
 };
 
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 03ad903..be0f206 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -23,6 +23,7 @@
 #define GRETH_BD_LEN 0x7FF
 
 #define GRETH_TXEN 0x1
+#define GRETH_INT_TE 0x2
 #define GRETH_INT_TX 0x8
 #define GRETH_TXI 0x4
 #define GRETH_TXBD_STATUS 0x0001C000
@@ -35,6 +36,7 @@
 #define GRETH_TXBD_ERR_UE 0x4000
 #define GRETH_TXBD_ERR_AL 0x8000
 
+#define GRETH_INT_RE         0x1
 #define GRETH_INT_RX         0x4
 #define GRETH_RXEN           0x2
 #define GRETH_RXI            0x8
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a060610..602078b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -6667,8 +6667,6 @@
 			  struct ixgbe_adapter *adapter,
 			  struct ixgbe_ring *tx_ring)
 {
-	struct net_device *netdev = tx_ring->netdev;
-	struct netdev_queue *txq;
 	unsigned int first;
 	unsigned int tx_flags = 0;
 	u8 hdr_len = 0;
@@ -6765,9 +6763,6 @@
 		/* add the ATR filter if ATR is on */
 		if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
 			ixgbe_atr(tx_ring, skb, tx_flags, protocol);
-		txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
-		txq->tx_bytes += skb->len;
-		txq->tx_packets++;
 		ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
 		ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
@@ -6925,8 +6920,6 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	int i;
 
-	/* accurate rx/tx bytes/packets stats */
-	dev_txq_stats_fold(netdev, stats);
 	rcu_read_lock();
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
@@ -6943,6 +6936,22 @@
 			stats->rx_bytes   += bytes;
 		}
 	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin_bh(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes   = ring->stats.bytes;
+			} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+			stats->tx_packets += packets;
+			stats->tx_bytes   += bytes;
+		}
+	}
 	rcu_read_unlock();
 	/* following stats updated by ixgbe_watchdog_task() */
 	stats->multicast	= netdev->stats.multicast;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 21845af..5933621 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -585,7 +585,7 @@
 	rcu_read_lock_bh();
 	vlan = rcu_dereference(q->vlan);
 	if (vlan)
-		netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+		vlan->dev->stats.tx_dropped++;
 	rcu_read_unlock_bh();
 
 	return err;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index bb8645a..bde7d61 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -554,6 +554,8 @@
 	struct mii_if_info mii;
 	struct rtl8169_counters counters;
 	u32 saved_wolopts;
+
+	const struct firmware *fw;
 };
 
 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -1766,6 +1768,29 @@
 	}
 }
 
+static void rtl_release_firmware(struct rtl8169_private *tp)
+{
+	release_firmware(tp->fw);
+	tp->fw = NULL;
+}
+
+static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
+{
+	const struct firmware **fw = &tp->fw;
+	int rc = !*fw;
+
+	if (rc) {
+		rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
+		if (rc < 0)
+			goto out;
+	}
+
+	/* TODO: release firmware once rtl_phy_write_fw signals failures. */
+	rtl_phy_write_fw(tp, *fw);
+out:
+	return rc;
+}
+
 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
 {
 	static const struct phy_reg phy_reg_init[] = {
@@ -2139,7 +2164,6 @@
 		{ 0x0d, 0xf880 }
 	};
 	void __iomem *ioaddr = tp->mmio_addr;
-	const struct firmware *fw;
 
 	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
 
@@ -2203,11 +2227,8 @@
 
 	rtl_writephy(tp, 0x1f, 0x0005);
 	rtl_writephy(tp, 0x05, 0x001b);
-	if (rtl_readphy(tp, 0x06) == 0xbf00 &&
-	    request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) {
-		rtl_phy_write_fw(tp, fw);
-		release_firmware(fw);
-	} else {
+	if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
+	    (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
 		netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
 	}
 
@@ -2257,7 +2278,6 @@
 		{ 0x0d, 0xf880 }
 	};
 	void __iomem *ioaddr = tp->mmio_addr;
-	const struct firmware *fw;
 
 	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
 
@@ -2312,11 +2332,8 @@
 
 	rtl_writephy(tp, 0x1f, 0x0005);
 	rtl_writephy(tp, 0x05, 0x001b);
-	if (rtl_readphy(tp, 0x06) == 0xb300 &&
-	    request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) {
-		rtl_phy_write_fw(tp, fw);
-		release_firmware(fw);
-	} else {
+	if ((rtl_readphy(tp, 0x06) != 0xb300) ||
+	    (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
 		netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
 	}
 
@@ -3200,6 +3217,8 @@
 
 	cancel_delayed_work_sync(&tp->task);
 
+	rtl_release_firmware(tp);
+
 	unregister_netdev(dev);
 
 	if (pci_dev_run_wake(pdev))
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 711449c..002bac7 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1153,6 +1153,9 @@
 	int count;
 	int cpu;
 
+	if (rss_cpus)
+		return rss_cpus;
+
 	if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
 		printk(KERN_WARNING
 		       "sfc: RSS disabled due to allocation failure\n");
@@ -1266,27 +1269,18 @@
 	efx->legacy_irq = 0;
 }
 
-struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
-{
-	unsigned tx_channel_offset =
-		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
-	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
-			    type >= EFX_TXQ_TYPES);
-	return &efx->channel[tx_channel_offset + index]->tx_queue[type];
-}
-
 static void efx_set_channels(struct efx_nic *efx)
 {
 	struct efx_channel *channel;
 	struct efx_tx_queue *tx_queue;
-	unsigned tx_channel_offset =
+
+	efx->tx_channel_offset =
 		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
 
 	/* Channel pointers were set in efx_init_struct() but we now
 	 * need to clear them for TX queues in any RX-only channels. */
 	efx_for_each_channel(channel, efx) {
-		if (channel->channel - tx_channel_offset >=
+		if (channel->channel - efx->tx_channel_offset >=
 		    efx->n_tx_channels) {
 			efx_for_each_channel_tx_queue(tx_queue, channel)
 				tx_queue->channel = NULL;
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index bdce66d..28df866 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -735,6 +735,7 @@
 	unsigned next_buffer_table;
 	unsigned n_channels;
 	unsigned n_rx_channels;
+	unsigned tx_channel_offset;
 	unsigned n_tx_channels;
 	unsigned int rx_buffer_len;
 	unsigned int rx_buffer_order;
@@ -929,8 +930,13 @@
 	     _channel = (_channel->channel + 1 < (_efx)->n_channels) ?	\
 		     (_efx)->channel[_channel->channel + 1] : NULL)
 
-extern struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+			    type >= EFX_TXQ_TYPES);
+	return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
+}
 
 static inline struct efx_tx_queue *
 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0e6bac5..7cb301d 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -142,14 +142,6 @@
 MODULE_AUTHOR("Tilera");
 MODULE_LICENSE("GPL");
 
-
-#define IS_MULTICAST(mac_addr) \
-	(((u8 *)(mac_addr))[0] & 0x01)
-
-#define IS_BROADCAST(mac_addr) \
-	(((u16 *)(mac_addr))[0] == 0xffff)
-
-
 /*
  * Queue of incoming packets for a specific cpu and device.
  *
@@ -795,7 +787,7 @@
 		/*
 		 * FIXME: Implement HW multicast filter.
 		 */
-		if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
+		if (is_unicast_ether_addr(buf)) {
 			/* Filter packets not for our address. */
 			const u8 *mine = dev->dev_addr;
 			filter = compare_ether_addr(mine, buf);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 73a3e0d..715e7b4 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2032,7 +2032,7 @@
 			netdev_for_each_mc_addr(ha, dev) {
 				/* Only support group multicast for now.
 				 */
-				if (!(ha->addr[0] & 1))
+				if (!is_multicast_ether_addr(ha->addr))
 					continue;
 
 				/* Ask CPM to run CRC and set bit in
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 593c104..d776c4a 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1021,13 +1021,15 @@
 		    (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
 			pr_debug("invalid frame detected (ignored)"
 				"offset[%u]=%u, length=%u, skb=%p\n",
-							x, offset, temp, skb);
+							x, offset, temp, skb_in);
 			if (!x)
 				goto error;
 			break;
 
 		} else {
 			skb = skb_clone(skb_in, GFP_ATOMIC);
+			if (!skb)
+				goto error;
 			skb->len = temp;
 			skb->data = ((u8 *)skb_in->data) + offset;
 			skb_set_tail_pointer(skb, temp);
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 1ac9b56..c81a651 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4120,6 +4120,7 @@
 	       "hotplug event.\n");
 
 out:
+	release_firmware(fw);
 	return ret;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 01880aa..ea2e7d7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -954,6 +954,9 @@
 				&adc_dc_cal_multi_sample;
 		}
 		ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+
+		if (AR_SREV_9287(ah))
+			ah->supp_cals &= ~ADC_GAIN_CAL;
 	}
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 088f141..749a936 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -226,6 +226,10 @@
 	    eep->baseEepHeader.pwdclkind == 0)
 		ah->need_an_top2_fixup = 1;
 
+	if ((common->bus_ops->ath_bus_type == ATH_USB) &&
+	    (AR_SREV_9280(ah)))
+		eep->modalHeader[0].xpaBiasLvl = 0;
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index a099b3e..1ce506f 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -433,6 +433,7 @@
 void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
 			enum htc_endpoint_id ep_id, bool txok);
 
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
 void ath9k_htc_station_work(struct work_struct *work);
 void ath9k_htc_aggr_work(struct work_struct *work);
 void ath9k_ani_work(struct work_struct *work);;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 845b4c9..f4d576b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -301,6 +301,16 @@
 
 	priv->nstations++;
 
+	/*
+	 * Set chainmask etc. on the target.
+	 */
+	ret = ath9k_htc_update_cap_target(priv);
+	if (ret)
+		ath_dbg(common, ATH_DBG_CONFIG,
+			"Failed to update capability in target\n");
+
+	priv->ah->is_monitoring = true;
+
 	return 0;
 
 err_vif:
@@ -328,6 +338,7 @@
 	}
 
 	priv->nstations--;
+	priv->ah->is_monitoring = false;
 
 	return 0;
 }
@@ -419,7 +430,7 @@
 	return 0;
 }
 
-static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
 {
 	struct ath9k_htc_cap_target tcap;
 	int ret;
@@ -1186,6 +1197,20 @@
 		}
 	}
 
+	/*
+	 * Monitor interface should be added before
+	 * IEEE80211_CONF_CHANGE_CHANNEL is handled.
+	 */
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (conf->flags & IEEE80211_CONF_MONITOR) {
+			if (ath9k_htc_add_monitor_interface(priv))
+				ath_err(common, "Failed to set monitor mode\n");
+			else
+				ath_dbg(common, ATH_DBG_CONFIG,
+					"HW opmode set to Monitor mode\n");
+		}
+	}
+
 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
 		struct ieee80211_channel *curchan = hw->conf.channel;
 		int pos = curchan->hw_value;
@@ -1221,16 +1246,6 @@
 		ath_update_txpow(priv);
 	}
 
-	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-		if (conf->flags & IEEE80211_CONF_MONITOR) {
-			if (ath9k_htc_add_monitor_interface(priv))
-				ath_err(common, "Failed to set monitor mode\n");
-			else
-				ath_dbg(common, ATH_DBG_CONFIG,
-					"HW opmode set to Monitor mode\n");
-		}
-	}
-
 	if (changed & IEEE80211_CONF_CHANGE_IDLE) {
 		mutex_lock(&priv->htc_pm_lock);
 		if (!priv->ps_idle) {
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fde9786..1afb8bb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -436,9 +436,10 @@
 
 static int ath9k_hw_post_init(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	int ecode;
 
-	if (!AR_SREV_9271(ah)) {
+	if (common->bus_ops->ath_bus_type != ATH_USB) {
 		if (!ath9k_hw_chip_test(ah))
 			return -ENODEV;
 	}
@@ -1213,7 +1214,7 @@
 	ah->txchainmask = common->tx_chainmask;
 	ah->rxchainmask = common->rx_chainmask;
 
-	if (!ah->chip_fullsleep) {
+	if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
 		ath9k_hw_abortpcurecv(ah);
 		if (!ath9k_hw_stopdmarecv(ah)) {
 			ath_dbg(common, ATH_DBG_XMIT,
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index bd8a413..2176ede 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -518,22 +518,21 @@
 	hw_priv->link = link;
 
 	/*
-	 * Make sure the IRQ handler cannot proceed until at least
-	 * dev->base_addr is initialized.
+	 * We enable IRQ here, but IRQ handler will not proceed
+	 * until dev->base_addr is set below. This protect us from
+	 * receive interrupts when driver is not initialized.
 	 */
-	spin_lock_irqsave(&local->irq_init_lock, flags);
-
 	ret = pcmcia_request_irq(link, prism2_interrupt);
 	if (ret)
-		goto failed_unlock;
+		goto failed;
 
 	ret = pcmcia_enable_device(link);
 	if (ret)
-		goto failed_unlock;
+		goto failed;
 
+	spin_lock_irqsave(&local->irq_init_lock, flags);
 	dev->irq = link->irq;
 	dev->base_addr = link->resource[0]->start;
-
 	spin_unlock_irqrestore(&local->irq_init_lock, flags);
 
 	local->shutdown = 0;
@@ -546,8 +545,6 @@
 
 	return ret;
 
- failed_unlock:
-	spin_unlock_irqrestore(&local->irq_init_lock, flags);
  failed:
 	kfree(hw_priv);
 	prism2_release((u_long)link);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d6ed5f..ae438ed 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1973,6 +1973,13 @@
 
 	inta = ipw_read32(priv, IPW_INTA_RW);
 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
+
+	if (inta == 0xFFFFFFFF) {
+		/* Hardware disappeared */
+		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
+		/* Only handle the cached INTA values */
+		inta = 0;
+	}
 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
 
 	/* Add any cached INTA values that need to be handled */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 76b2318a..f618b96 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -618,7 +618,7 @@
 	else
 		*burst_possible = false;
 
-	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+	if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
 		*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
 
 	if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index 401c44b..bae6472 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -69,7 +69,7 @@
 	struct mutex read_mutex; /* Serialize read_irq access */
 	struct mutex mutex; /* Serialize info struct access */
 	u8 *buf;
-	unsigned int buflen;
+	size_t buflen;
 };
 
 static const char reg_vdd_io[]	= "Vdd_IO";
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b65e65a..e567302 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -990,30 +990,51 @@
 
 static void set_media_not_present(struct scsi_disk *sdkp)
 {
-	sdkp->media_present = 0;
-	sdkp->capacity = 0;
-	sdkp->device->changed = 1;
+	if (sdkp->media_present)
+		sdkp->device->changed = 1;
+
+	if (sdkp->device->removable) {
+		sdkp->media_present = 0;
+		sdkp->capacity = 0;
+	}
+}
+
+static int media_not_present(struct scsi_disk *sdkp,
+			     struct scsi_sense_hdr *sshdr)
+{
+	if (!scsi_sense_valid(sshdr))
+		return 0;
+
+	/* not invoked for commands that could return deferred errors */
+	switch (sshdr->sense_key) {
+	case UNIT_ATTENTION:
+	case NOT_READY:
+		/* medium not present */
+		if (sshdr->asc == 0x3A) {
+			set_media_not_present(sdkp);
+			return 1;
+		}
+	}
+	return 0;
 }
 
 /**
- *	sd_media_changed - check if our medium changed
- *	@disk: kernel device descriptor 
+ *	sd_check_events - check media events
+ *	@disk: kernel device descriptor
+ *	@clearing: disk events currently being cleared
  *
- *	Returns 0 if not applicable or no change; 1 if change
+ *	Returns mask of DISK_EVENT_*.
  *
  *	Note: this function is invoked from the block subsystem.
  **/
-static int sd_media_changed(struct gendisk *disk)
+static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
 {
 	struct scsi_disk *sdkp = scsi_disk(disk);
 	struct scsi_device *sdp = sdkp->device;
 	struct scsi_sense_hdr *sshdr = NULL;
 	int retval;
 
-	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
-
-	if (!sdp->removable)
-		return 0;
+	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
 
 	/*
 	 * If the device is offline, don't send any commands - just pretend as
@@ -1043,40 +1064,32 @@
 					      sshdr);
 	}
 
-	if (retval) {
+	/* failed to execute TUR, assume media not present */
+	if (host_byte(retval)) {
 		set_media_not_present(sdkp);
 		goto out;
 	}
 
+	if (media_not_present(sdkp, sshdr))
+		goto out;
+
 	/*
 	 * For removable scsi disk we have to recognise the presence
-	 * of a disk in the drive. This is kept in the struct scsi_disk
-	 * struct and tested at open !  Daniel Roche (dan@lectra.fr)
+	 * of a disk in the drive.
 	 */
+	if (!sdkp->media_present)
+		sdp->changed = 1;
 	sdkp->media_present = 1;
-
 out:
 	/*
-	 * Report a media change under the following conditions:
+	 * sdp->changed is set under the following conditions:
 	 *
-	 *	Medium is present now and wasn't present before.
-	 *	Medium wasn't present before and is present now.
-	 *	Medium was present at all times, but it changed while
-	 *		we weren't looking (sdp->changed is set).
-	 *
-	 * If there was no medium before and there is no medium now then
-	 * don't report a change, even if a medium was inserted and removed
-	 * while we weren't looking.
+	 *	Medium present state has changed in either direction.
+	 *	Device has indicated UNIT_ATTENTION.
 	 */
-	retval = (sdkp->media_present != sdkp->previous_state ||
-			(sdkp->media_present && sdp->changed));
-	if (retval)
-		sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
-	sdkp->previous_state = sdkp->media_present;
-
-	/* sdp->changed indicates medium was changed or is not present */
-	sdp->changed = !sdkp->media_present;
 	kfree(sshdr);
+	retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+	sdp->changed = 0;
 	return retval;
 }
 
@@ -1169,7 +1182,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_ioctl		= sd_compat_ioctl,
 #endif
-	.media_changed		= sd_media_changed,
+	.check_events		= sd_check_events,
 	.revalidate_disk	= sd_revalidate_disk,
 	.unlock_native_capacity	= sd_unlock_native_capacity,
 };
@@ -1312,23 +1325,6 @@
 	return good_bytes;
 }
 
-static int media_not_present(struct scsi_disk *sdkp,
-			     struct scsi_sense_hdr *sshdr)
-{
-
-	if (!scsi_sense_valid(sshdr))
-		return 0;
-	/* not invoked for commands that could return deferred errors */
-	if (sshdr->sense_key != NOT_READY &&
-	    sshdr->sense_key != UNIT_ATTENTION)
-		return 0;
-	if (sshdr->asc != 0x3A) /* medium not present */
-		return 0;
-
-	set_media_not_present(sdkp);
-	return 1;
-}
-
 /*
  * spinup disk - called only in sd_revalidate_disk()
  */
@@ -1503,7 +1499,7 @@
 	 */
 	if (sdp->removable &&
 	    sense_valid && sshdr->sense_key == NOT_READY)
-		sdp->changed = 1;
+		set_media_not_present(sdkp);
 
 	/*
 	 * We used to set media_present to 0 here to indicate no media
@@ -2389,8 +2385,10 @@
 
 	gd->driverfs_dev = &sdp->sdev_gendev;
 	gd->flags = GENHD_FL_EXT_DEVT;
-	if (sdp->removable)
+	if (sdp->removable) {
 		gd->flags |= GENHD_FL_REMOVABLE;
+		gd->events |= DISK_EVENT_MEDIA_CHANGE;
+	}
 
 	add_disk(gd);
 	sd_dif_config_host(sdkp);
@@ -2472,7 +2470,6 @@
 	sdkp->disk = gd;
 	sdkp->index = index;
 	atomic_set(&sdkp->openers, 0);
-	sdkp->previous_state = 1;
 
 	if (!sdp->request_queue->rq_timeout) {
 		if (sdp->type != TYPE_MOD)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 55488fa..c9d8f6c 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -55,7 +55,6 @@
 	u8		media_present;
 	u8		write_prot;
 	u8		protection_type;/* Data Integrity Field */
-	unsigned	previous_state : 1;
 	unsigned	ATO : 1;	/* state of disk ATO bit */
 	unsigned	WCE : 1;	/* state of disk WCE bit */
 	unsigned	RCD : 1;	/* state of disk RCD bit, unused */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index be6baf8..aefadc6 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -249,10 +249,6 @@
 		cd->device->changed = 0;
 	}
 
-	/* for backward compatibility */
-	if (events & DISK_EVENT_MEDIA_CHANGE)
-		sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE,
-				     GFP_KERNEL);
 	return events;
 }
 
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index 7ac2bf5..2335eda 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -883,10 +883,10 @@
 
 static struct uart_driver s3c24xx_uart_drv = {
 	.owner		= THIS_MODULE,
-	.dev_name	= "s3c2410_serial",
+	.driver_name	= "s3c2410_serial",
 	.nr		= CONFIG_SERIAL_SAMSUNG_UARTS,
 	.cons		= S3C24XX_SERIAL_CONSOLE,
-	.driver_name	= S3C24XX_SERIAL_NAME,
+	.dev_name	= S3C24XX_SERIAL_NAME,
 	.major		= S3C24XX_SERIAL_MAJOR,
 	.minor		= S3C24XX_SERIAL_MINOR,
 };
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1906840..13bfa9d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -156,10 +156,10 @@
 	def_bool y if ARCH_MX31
 
 config SPI_IMX_VER_0_7
-	def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51
+	def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53
 
 config SPI_IMX_VER_2_3
-	def_bool y if ARCH_MX51
+	def_bool y if ARCH_MX51 || ARCH_MX53
 
 config SPI_IMX
 	tristate "Freescale i.MX SPI controllers"
@@ -310,8 +310,8 @@
 
 config SPI_S3C64XX
 	tristate "Samsung S3C64XX series type SPI"
-	depends on ARCH_S3C64XX && EXPERIMENTAL
-	select S3C64XX_DMA
+	depends on (ARCH_S3C64XX || ARCH_S5P64X0)
+	select S3C64XX_DMA if ARCH_S3C64XX
 	help
 	  SPI driver for Samsung S3C64XX and newer SoCs.
 
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index a2a5921..71a1219 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1795,7 +1795,7 @@
 {
 	struct pl022_config_chip const *chip_info;
 	struct chip_data *chip;
-	struct ssp_clock_params clk_freq;
+	struct ssp_clock_params clk_freq = {0, };
 	int status = 0;
 	struct pl022 *pl022 = spi_master_get_devdata(spi->master);
 	unsigned int bits = spi->bits_per_word;
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
index db35bd9..2fa012c 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/dw_spi_mmio.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -68,8 +69,8 @@
 	}
 
 	dwsmmio->clk = clk_get(&pdev->dev, NULL);
-	if (!dwsmmio->clk) {
-		ret = -ENODEV;
+	if (IS_ERR(dwsmmio->clk)) {
+		ret = PTR_ERR(dwsmmio->clk);
 		goto err_irq;
 	}
 	clk_enable(dwsmmio->clk);
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 9469564..1cf9d5f 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -743,6 +743,12 @@
 		.name = "imx51-ecspi",
 		.driver_data = SPI_IMX_VER_2_3,
 	}, {
+		.name = "imx53-cspi",
+		.driver_data = SPI_IMX_VER_0_7,
+	}, {
+		.name = "imx53-ecspi",
+		.driver_data = SPI_IMX_VER_2_3,
+	}, {
 		/* sentinel */
 	}
 };
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c
index bb7df02..891e590 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi_tegra.c
@@ -513,7 +513,7 @@
 	}
 
 	tspi->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR_OR_NULL(tspi->clk)) {
+	if (IS_ERR(tspi->clk)) {
 		dev_err(&pdev->dev, "can not get clock\n");
 		ret = PTR_ERR(tspi->clk);
 		goto err2;
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 5a0985d..29884c0 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -420,6 +420,16 @@
 			bus->pcicore.dev = dev;
 #endif /* CONFIG_SSB_DRIVER_PCICORE */
 			break;
+		case SSB_DEV_ETHERNET:
+			if (bus->bustype == SSB_BUSTYPE_PCI) {
+				if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM &&
+				    (bus->host_pci->device & 0xFF00) == 0x4300) {
+					/* This is a dangling ethernet core on a
+					 * wireless device. Ignore it. */
+					continue;
+				}
+			}
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/staging/autofs/dirhash.c b/drivers/staging/autofs/dirhash.c
index d3f42c8..a08bd73 100644
--- a/drivers/staging/autofs/dirhash.c
+++ b/drivers/staging/autofs/dirhash.c
@@ -88,14 +88,13 @@
 		}
 		path.mnt = mnt;
 		path_get(&path);
-		if (!follow_down(&path)) {
+		if (!follow_down_one(&path)) {
 			path_put(&path);
 			DPRINTK(("autofs: not expirable\
 			(not a mounted directory): %s\n", ent->name));
 			continue;
 		}
-		while (d_mountpoint(path.dentry) && follow_down(&path))
-			;
+		follow_down(&path, false);  // TODO: need to check error
 		umount_ok = may_umount(path.mnt);
 		path_put(&path);
 
diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c
index 87a3a9b..f204d33 100644
--- a/drivers/staging/smbfs/dir.c
+++ b/drivers/staging/smbfs/dir.c
@@ -283,7 +283,7 @@
 		unsigned int, const char *, const struct qstr *);
 static int smb_delete_dentry(const struct dentry *);
 
-static const struct dentry_operations smbfs_dentry_operations =
+const struct dentry_operations smbfs_dentry_operations =
 {
 	.d_revalidate	= smb_lookup_validate,
 	.d_hash		= smb_hash_dentry,
@@ -291,7 +291,7 @@
 	.d_delete	= smb_delete_dentry,
 };
 
-static const struct dentry_operations smbfs_dentry_operations_case =
+const struct dentry_operations smbfs_dentry_operations_case =
 {
 	.d_revalidate	= smb_lookup_validate,
 	.d_delete	= smb_delete_dentry,
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
new file mode 100644
index 0000000..2fac3be
--- /dev/null
+++ b/drivers/target/Kconfig
@@ -0,0 +1,32 @@
+
+menuconfig TARGET_CORE
+	tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
+	depends on SCSI && BLOCK
+	select CONFIGFS_FS
+	default n
+	help
+	Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
+	control path for target_core_mod.  This includes built-in TCM RAMDISK
+	subsystem logic for virtual LUN 0 access
+
+if TARGET_CORE
+
+config TCM_IBLOCK
+	tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+	help
+	Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
+	access to Linux/Block devices using BIO
+
+config TCM_FILEIO
+	tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
+	help
+	Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
+	access to Linux/VFS struct file or struct block_device
+
+config TCM_PSCSI
+	tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+	help
+	Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
+	passthrough access to Linux/SCSI device
+
+endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
new file mode 100644
index 0000000..5cfd708
--- /dev/null
+++ b/drivers/target/Makefile
@@ -0,0 +1,24 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
+
+target_core_mod-y		:= target_core_configfs.o \
+				   target_core_device.o \
+				   target_core_fabric_configfs.o \
+				   target_core_fabric_lib.o \
+				   target_core_hba.o \
+				   target_core_pr.o \
+				   target_core_alua.o \
+				   target_core_scdb.o \
+				   target_core_tmr.o \
+				   target_core_tpg.o \
+				   target_core_transport.o \
+				   target_core_cdb.o \
+				   target_core_ua.o \
+				   target_core_rd.o \
+				   target_core_mib.o
+
+obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
+
+# Subsystem modules
+obj-$(CONFIG_TCM_IBLOCK)	+= target_core_iblock.o
+obj-$(CONFIG_TCM_FILEIO)	+= target_core_file.o
+obj-$(CONFIG_TCM_PSCSI)		+= target_core_pscsi.o
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
new file mode 100644
index 0000000..2c5fcfe
--- /dev/null
+++ b/drivers/target/target_core_alua.c
@@ -0,0 +1,1991 @@
+/*******************************************************************************
+ * Filename:  target_core_alua.c
+ *
+ * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
+ *
+ * Copyright (c) 2009-2010 Rising Tide Systems
+ * Copyright (c) 2009-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_ua.h"
+
+static int core_alua_check_transition(int state, int *primary);
+static int core_alua_set_tg_pt_secondary_state(
+		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+		struct se_port *port, int explict, int offline);
+
+/*
+ * REPORT_TARGET_PORT_GROUPS
+ *
+ * See spc4r17 section 6.27
+ */
+int core_emulate_report_target_port_groups(struct se_cmd *cmd)
+{
+	struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+	struct se_port *port;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
+				    Target port group descriptor */
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+			tg_pt_gp_list) {
+		/*
+		 * PREF: Preferred target port bit, determine if this
+		 * bit should be set for port group.
+		 */
+		if (tg_pt_gp->tg_pt_gp_pref)
+			buf[off] = 0x80;
+		/*
+		 * Set the ASYMMETRIC ACCESS State
+		 */
+		buf[off++] |= (atomic_read(
+			&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+		/*
+		 * Set supported ASYMMETRIC ACCESS State bits
+		 */
+		buf[off] = 0x80; /* T_SUP */
+		buf[off] |= 0x40; /* O_SUP */
+		buf[off] |= 0x8; /* U_SUP */
+		buf[off] |= 0x4; /* S_SUP */
+		buf[off] |= 0x2; /* AN_SUP */
+		buf[off++] |= 0x1; /* AO_SUP */
+		/*
+		 * TARGET PORT GROUP
+		 */
+		buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
+
+		off++; /* Skip over Reserved */
+		/*
+		 * STATUS CODE
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
+		/*
+		 * Vendor Specific field
+		 */
+		buf[off++] = 0x00;
+		/*
+		 * TARGET PORT COUNT
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
+		rd_len += 8;
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+				tg_pt_gp_mem_list) {
+			port = tg_pt_gp_mem->tg_pt;
+			/*
+			 * Start Target Port descriptor format
+			 *
+			 * See spc4r17 section 6.2.7 Table 247
+			 */
+			off += 2; /* Skip over Obsolete */
+			/*
+			 * Set RELATIVE TARGET PORT IDENTIFIER
+			 */
+			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+			buf[off++] = (port->sep_rtpi & 0xff);
+			rd_len += 4;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	/*
+	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+	 */
+	buf[0] = ((rd_len >> 24) & 0xff);
+	buf[1] = ((rd_len >> 16) & 0xff);
+	buf[2] = ((rd_len >> 8) & 0xff);
+	buf[3] = (rd_len & 0xff);
+
+	return 0;
+}
+
+/*
+ * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ *
+ * See spc4r17 section 6.35
+ */
+int core_emulate_set_target_port_groups(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+	struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
+	struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
+	u32 len = 4; /* Skip over RESERVED area in header */
+	int alua_access_state, primary = 0, rc;
+	u16 tg_pt_id, rtpi;
+
+	if (!(l_port))
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	/*
+	 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+	 * for the local tg_pt_gp.
+	 */
+	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
+	if (!(l_tg_pt_gp_mem)) {
+		printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+	spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
+	if (!(l_tg_pt_gp)) {
+		spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+	rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
+	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	if (!(rc)) {
+		printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
+				" while TPGS_EXPLICT_ALUA is disabled\n");
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+
+	while (len < cmd->data_length) {
+		alua_access_state = (ptr[0] & 0x0f);
+		/*
+		 * Check the received ALUA access state, and determine if
+		 * the state is a primary or secondary target port asymmetric
+		 * access state.
+		 */
+		rc = core_alua_check_transition(alua_access_state, &primary);
+		if (rc != 0) {
+			/*
+			 * If the SET TARGET PORT GROUPS attempts to establish
+			 * an invalid combination of target port asymmetric
+			 * access states or attempts to establish an
+			 * unsupported target port asymmetric access state,
+			 * then the command shall be terminated with CHECK
+			 * CONDITION status, with the sense key set to ILLEGAL
+			 * REQUEST, and the additional sense code set to INVALID
+			 * FIELD IN PARAMETER LIST.
+			 */
+			return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		}
+		rc = -1;
+		/*
+		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
+		 * specifies a primary target port asymmetric access state,
+		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
+		 * a primary target port group for which the primary target
+		 * port asymmetric access state shall be changed. If the
+		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
+		 * port asymmetric access state, then the TARGET PORT GROUP OR
+		 * TARGET PORT field specifies the relative target port
+		 * identifier (see 3.1.120) of the target port for which the
+		 * secondary target port asymmetric access state shall be
+		 * changed.
+		 */
+		if (primary) {
+			tg_pt_id = ((ptr[2] << 8) & 0xff);
+			tg_pt_id |= (ptr[3] & 0xff);
+			/*
+			 * Locate the matching target port group ID from
+			 * the global tg_pt_gp list
+			 */
+			spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			list_for_each_entry(tg_pt_gp,
+					&T10_ALUA(su_dev)->tg_pt_gps_list,
+					tg_pt_gp_list) {
+				if (!(tg_pt_gp->tg_pt_gp_valid_id))
+					continue;
+
+				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
+					continue;
+
+				atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+				smp_mb__after_atomic_inc();
+				spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+				rc = core_alua_do_port_transition(tg_pt_gp,
+						dev, l_port, nacl,
+						alua_access_state, 1);
+
+				spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+				atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+				smp_mb__after_atomic_dec();
+				break;
+			}
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			/*
+			 * If not matching target port group ID can be located
+			 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
+			 */
+			if (rc != 0)
+				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		} else {
+			/*
+			 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
+			 * the Target Port in question for the the incoming
+			 * SET_TARGET_PORT_GROUPS op.
+			 */
+			rtpi = ((ptr[2] << 8) & 0xff);
+			rtpi |= (ptr[3] & 0xff);
+			/*
+			 * Locate the matching relative target port identifer
+			 * for the struct se_device storage object.
+			 */
+			spin_lock(&dev->se_port_lock);
+			list_for_each_entry(port, &dev->dev_sep_list,
+							sep_list) {
+				if (port->sep_rtpi != rtpi)
+					continue;
+
+				tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+				spin_unlock(&dev->se_port_lock);
+
+				rc = core_alua_set_tg_pt_secondary_state(
+						tg_pt_gp_mem, port, 1, 1);
+
+				spin_lock(&dev->se_port_lock);
+				break;
+			}
+			spin_unlock(&dev->se_port_lock);
+			/*
+			 * If not matching relative target port identifier can
+			 * be located, throw an exception with ASCQ:
+			 * INVALID_PARAMETER_LIST
+			 */
+			if (rc != 0)
+				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		}
+
+		ptr += 4;
+		len += 4;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_nonoptimized(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	int nonop_delay_msecs,
+	u8 *alua_ascq)
+{
+	/*
+	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
+	 * later to determine if processing of this cmd needs to be
+	 * temporarily delayed for the Active/NonOptimized primary access state.
+	 */
+	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
+	cmd->alua_nonop_delay = nonop_delay_msecs;
+	return 0;
+}
+
+static inline int core_alua_state_standby(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
+	 * spc4r17 section 5.9.2.4.4
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case LOG_SELECT:
+	case LOG_SENSE:
+	case MODE_SELECT:
+	case MODE_SENSE:
+	case REPORT_LUNS:
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+	case MAINTENANCE_IN:
+		switch (cdb[1]) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case PERSISTENT_RESERVE_IN:
+	case PERSISTENT_RESERVE_OUT:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_unavailable(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
+	 * spc4r17 section 5.9.2.4.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case MAINTENANCE_IN:
+		switch (cdb[1]) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_transition(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+	 * spc4r17 section 5.9.2.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case MAINTENANCE_IN:
+		switch (cdb[1]) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
+ * in transport_cmd_sequencer().  This function is assigned to
+ * struct t10_alua *->state_check() in core_setup_alua()
+ */
+static int core_alua_state_check_nop(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	return 0;
+}
+
+/*
+ * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
+ * This function is assigned to struct t10_alua *->state_check() in
+ * core_setup_alua()
+ *
+ * Also, this function can return three different return codes to
+ * signal transport_generic_cmd_sequencer()
+ *
+ * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 0: Used to signal success
+ * reutrn -1: Used to signal failure, and invalid cdb field
+ */
+static int core_alua_state_check(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	struct se_lun *lun = SE_LUN(cmd);
+	struct se_port *port = lun->lun_sep;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	int out_alua_state, nonop_delay_msecs;
+
+	if (!(port))
+		return 0;
+	/*
+	 * First, check for a struct se_port specific secondary ALUA target port
+	 * access state: OFFLINE
+	 */
+	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
+		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+		printk(KERN_INFO "ALUA: Got secondary offline status for local"
+				" target port\n");
+		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+		return 1;
+	}
+	 /*
+	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
+	 * ALUA target port group, to obtain current ALUA access state.
+	 * Otherwise look for the underlying struct se_device association with
+	 * a ALUA logical unit group.
+	 */
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	/*
+	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional
+	 * statement so the complier knows explictly to check this case first.
+	 * For the Optimized ALUA access state case, we want to process the
+	 * incoming fabric cmd ASAP..
+	 */
+	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+		return 0;
+
+	switch (out_alua_state) {
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		return core_alua_state_nonoptimized(cmd, cdb,
+					nonop_delay_msecs, alua_ascq);
+	case ALUA_ACCESS_STATE_STANDBY:
+		return core_alua_state_standby(cmd, cdb, alua_ascq);
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+	case ALUA_ACCESS_STATE_TRANSITION:
+		return core_alua_state_transition(cmd, cdb, alua_ascq);
+	/*
+	 * OFFLINE is a secondary ALUA target port group access state, that is
+	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
+	 */
+	case ALUA_ACCESS_STATE_OFFLINE:
+	default:
+		printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
+				out_alua_state);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Check implict and explict ALUA state change request.
+ */
+static int core_alua_check_transition(int state, int *primary)
+{
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+	case ALUA_ACCESS_STATE_STANDBY:
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		/*
+		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+		 * defined as primary target port asymmetric access states.
+		 */
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_OFFLINE:
+		/*
+		 * OFFLINE state is defined as a secondary target port
+		 * asymmetric access state.
+		 */
+		*primary = 0;
+		break;
+	default:
+		printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
+		return -1;
+	}
+
+	return 0;
+}
+
+static char *core_alua_dump_state(int state)
+{
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+		return "Active/Optimized";
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		return "Active/NonOptimized";
+	case ALUA_ACCESS_STATE_STANDBY:
+		return "Standby";
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		return "Unavailable";
+	case ALUA_ACCESS_STATE_OFFLINE:
+		return "Offline";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+char *core_alua_dump_status(int status)
+{
+	switch (status) {
+	case ALUA_STATUS_NONE:
+		return "None";
+	case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
+		return "Altered by Explict STPG";
+	case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
+		return "Altered by Implict ALUA";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+/*
+ * Used by fabric modules to determine when we need to delay processing
+ * for the Active/NonOptimized paths..
+ */
+int core_alua_check_nonop_delay(
+	struct se_cmd *cmd)
+{
+	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
+		return 0;
+	if (in_interrupt())
+		return 0;
+	/*
+	 * The ALUA Active/NonOptimized access state delay can be disabled
+	 * in via configfs with a value of zero
+	 */
+	if (!(cmd->alua_nonop_delay))
+		return 0;
+	/*
+	 * struct se_cmd->alua_nonop_delay gets set by a target port group
+	 * defined interval in core_alua_state_nonoptimized()
+	 */
+	msleep_interruptible(cmd->alua_nonop_delay);
+	return 0;
+}
+EXPORT_SYMBOL(core_alua_check_nonop_delay);
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
+ *
+ */
+static int core_alua_write_tpg_metadata(
+	const char *path,
+	unsigned char *md_buf,
+	u32 md_buf_len)
+{
+	mm_segment_t old_fs;
+	struct file *file;
+	struct iovec iov[1];
+	int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
+
+	memset(iov, 0, sizeof(struct iovec));
+
+	file = filp_open(path, flags, 0600);
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
+			path);
+		return -ENODEV;
+	}
+
+	iov[0].iov_base = &md_buf[0];
+	iov[0].iov_len = md_buf_len;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+	set_fs(old_fs);
+
+	if (ret < 0) {
+		printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
+		filp_close(file, NULL);
+		return -EIO;
+	}
+	filp_close(file, NULL);
+
+	return 0;
+}
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ */
+static int core_alua_update_tpg_primary_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	int primary_state,
+	unsigned char *md_buf)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_wwn *wwn = &su_dev->t10_wwn;
+	char path[ALUA_METADATA_PATH_LEN];
+	int len;
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+
+	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+			"tg_pt_gp_id=%hu\n"
+			"alua_access_state=0x%02x\n"
+			"alua_access_status=0x%02x\n",
+			tg_pt_gp->tg_pt_gp_id, primary_state,
+			tg_pt_gp->tg_pt_gp_alua_access_status);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN,
+		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+
+	return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_do_transition_tg_pt(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	struct se_port *l_port,
+	struct se_node_acl *nacl,
+	unsigned char *md_buf,
+	int new_state,
+	int explict)
+{
+	struct se_dev_entry *se_deve;
+	struct se_lun_acl *lacl;
+	struct se_port *port;
+	struct t10_alua_tg_pt_gp_member *mem;
+	int old_state = 0;
+	/*
+	 * Save the old primary ALUA access state, and set the current state
+	 * to ALUA_ACCESS_STATE_TRANSITION.
+	 */
+	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+			ALUA_ACCESS_STATE_TRANSITION);
+	tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
+				ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+				ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+	/*
+	 * Check for the optional ALUA primary state transition delay
+	 */
+	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
+				tg_pt_gp_mem_list) {
+		port = mem->tg_pt;
+		/*
+		 * After an implicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition for the initiator port associated with every I_T
+		 * nexus with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHAGED.
+		 *
+		 * After an explicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHANGED for the initiator port associated with
+		 * every I_T nexus other than the I_T nexus on which the SET
+		 * TARGET PORT GROUPS command
+		 */
+		atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_for_each_entry(se_deve, &port->sep_alua_list,
+					alua_port_list) {
+			lacl = se_deve->se_lun_acl;
+			/*
+			 * se_deve->se_lun_acl pointer may be NULL for a
+			 * entry created without explict Node+MappedLUN ACLs
+			 */
+			if (!(lacl))
+				continue;
+
+			if (explict &&
+			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
+			   (l_port != NULL) && (l_port == port))
+				continue;
+
+			core_scsi3_ua_allocate(lacl->se_lun_nacl,
+				se_deve->mapped_lun, 0x2A,
+				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
+		}
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+	/*
+	 * Update the ALUA metadata buf that has been allocated in
+	 * core_alua_do_port_transition(), this metadata will be written
+	 * to struct file.
+	 *
+	 * Note that there is the case where we do not want to update the
+	 * metadata when the saved metadata is being parsed in userspace
+	 * when setting the existing port access state and access status.
+	 *
+	 * Also note that the failure to write out the ALUA metadata to
+	 * struct file does NOT affect the actual ALUA transition.
+	 */
+	if (tg_pt_gp->tg_pt_gp_write_metadata) {
+		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
+		core_alua_update_tpg_primary_metadata(tg_pt_gp,
+					new_state, md_buf);
+		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
+	}
+	/*
+	 * Set the current primary ALUA access state to the requested new state
+	 */
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+
+	printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" from primary access state %s to %s\n", (explict) ? "explict" :
+		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
+		core_alua_dump_state(new_state));
+
+	return 0;
+}
+
+int core_alua_do_port_transition(
+	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
+	struct se_device *l_dev,
+	struct se_port *l_port,
+	struct se_node_acl *l_nacl,
+	int new_state,
+	int explict)
+{
+	struct se_device *dev;
+	struct se_port *port;
+	struct se_subsystem_dev *su_dev;
+	struct se_node_acl *nacl;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	unsigned char *md_buf;
+	int primary;
+
+	if (core_alua_check_transition(new_state, &primary) != 0)
+		return -EINVAL;
+
+	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
+	if (!(md_buf)) {
+		printk("Unable to allocate buf for ALUA metadata\n");
+		return -ENOMEM;
+	}
+
+	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
+	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = local_lu_gp_mem->lu_gp;
+	atomic_inc(&lu_gp->lu_gp_ref_cnt);
+	smp_mb__after_atomic_inc();
+	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
+	/*
+	 * For storage objects that are members of the 'default_lu_gp',
+	 * we only do transition on the passed *l_tp_pt_gp, and not
+	 * on all of the matching target port groups IDs in default_lu_gp.
+	 */
+	if (!(lu_gp->lu_gp_id)) {
+		/*
+		 * core_alua_do_transition_tg_pt() will always return
+		 * success.
+		 */
+		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
+					md_buf, new_state, explict);
+		atomic_dec(&lu_gp->lu_gp_ref_cnt);
+		smp_mb__after_atomic_dec();
+		kfree(md_buf);
+		return 0;
+	}
+	/*
+	 * For all other LU groups aside from 'default_lu_gp', walk all of
+	 * the associated storage objects looking for a matching target port
+	 * group ID from the local target port group.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
+				lu_gp_mem_list) {
+
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		su_dev = dev->se_sub_dev;
+		atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&lu_gp->lu_gp_lock);
+
+		spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+		list_for_each_entry(tg_pt_gp,
+				&T10_ALUA(su_dev)->tg_pt_gps_list,
+				tg_pt_gp_list) {
+
+			if (!(tg_pt_gp->tg_pt_gp_valid_id))
+				continue;
+			/*
+			 * If the target behavior port asymmetric access state
+			 * is changed for any target port group accessiable via
+			 * a logical unit within a LU group, the target port
+			 * behavior group asymmetric access states for the same
+			 * target port group accessible via other logical units
+			 * in that LU group will also change.
+			 */
+			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
+				continue;
+
+			if (l_tg_pt_gp == tg_pt_gp) {
+				port = l_port;
+				nacl = l_nacl;
+			} else {
+				port = NULL;
+				nacl = NULL;
+			}
+			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			/*
+			 * core_alua_do_transition_tg_pt() will always return
+			 * success.
+			 */
+			core_alua_do_transition_tg_pt(tg_pt_gp, port,
+					nacl, md_buf, new_state, explict);
+
+			spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			smp_mb__after_atomic_dec();
+		}
+		spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+		atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
+		" Group IDs: %hu %s transition to primary state: %s\n",
+		config_item_name(&lu_gp->lu_gp_group.cg_item),
+		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
+		core_alua_dump_state(new_state));
+
+	atomic_dec(&lu_gp->lu_gp_ref_cnt);
+	smp_mb__after_atomic_dec();
+	kfree(md_buf);
+	return 0;
+}
+
+/*
+ * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
+ */
+static int core_alua_update_tpg_secondary_metadata(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct se_port *port,
+	unsigned char *md_buf,
+	u32 md_buf_len)
+{
+	struct se_portal_group *se_tpg = port->sep_tpg;
+	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+	int len;
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
+
+	len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
+			TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
+
+	if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
+		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
+				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+			"alua_tg_pt_status=0x%02x\n",
+			atomic_read(&port->sep_tg_pt_secondary_offline),
+			port->sep_tg_pt_secondary_stat);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
+			TPG_TFO(se_tpg)->get_fabric_name(), wwn,
+			port->sep_lun->unpacked_lun);
+
+	return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_set_tg_pt_secondary_state(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct se_port *port,
+	int explict,
+	int offline)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	unsigned char *md_buf;
+	u32 md_buf_len;
+	int trans_delay_msecs;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (!(tg_pt_gp)) {
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		printk(KERN_ERR "Unable to complete secondary state"
+				" transition\n");
+		return -1;
+	}
+	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
+	/*
+	 * Set the secondary ALUA target port access state to OFFLINE
+	 * or release the previously secondary state for struct se_port
+	 */
+	if (offline)
+		atomic_set(&port->sep_tg_pt_secondary_offline, 1);
+	else
+		atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+
+	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
+	port->sep_tg_pt_secondary_stat = (explict) ?
+			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+
+	printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" to secondary access state: %s\n", (explict) ? "explict" :
+		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
+
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	/*
+	 * Do the optional transition delay after we set the secondary
+	 * ALUA access state.
+	 */
+	if (trans_delay_msecs != 0)
+		msleep_interruptible(trans_delay_msecs);
+	/*
+	 * See if we need to update the ALUA fabric port metadata for
+	 * secondary state and status
+	 */
+	if (port->sep_tg_pt_secondary_write_md) {
+		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
+		if (!(md_buf)) {
+			printk(KERN_ERR "Unable to allocate md_buf for"
+				" secondary ALUA access metadata\n");
+			return -1;
+		}
+		mutex_lock(&port->sep_tg_pt_md_mutex);
+		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
+				md_buf, md_buf_len);
+		mutex_unlock(&port->sep_tg_pt_md_mutex);
+
+		kfree(md_buf);
+	}
+
+	return 0;
+}
+
+struct t10_alua_lu_gp *
+core_alua_allocate_lu_gp(const char *name, int def_group)
+{
+	struct t10_alua_lu_gp *lu_gp;
+
+	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
+	if (!(lu_gp)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
+		return ERR_PTR(-ENOMEM);;
+	}
+	INIT_LIST_HEAD(&lu_gp->lu_gp_list);
+	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
+	spin_lock_init(&lu_gp->lu_gp_lock);
+	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
+
+	if (def_group) {
+		lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;;
+		lu_gp->lu_gp_valid_id = 1;
+		se_global->alua_lu_gps_count++;
+	}
+
+	return lu_gp;
+}
+
+int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
+{
+	struct t10_alua_lu_gp *lu_gp_tmp;
+	u16 lu_gp_id_tmp;
+	/*
+	 * The lu_gp->lu_gp_id may only be set once..
+	 */
+	if (lu_gp->lu_gp_valid_id) {
+		printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
+			" ignoring request\n");
+		return -1;
+	}
+
+	spin_lock(&se_global->lu_gps_lock);
+	if (se_global->alua_lu_gps_count == 0x0000ffff) {
+		printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
+				" 0x0000ffff reached\n");
+		spin_unlock(&se_global->lu_gps_lock);
+		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+		return -1;
+	}
+again:
+	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
+				se_global->alua_lu_gps_counter++;
+
+	list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
+		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
+			if (!(lu_gp_id))
+				goto again;
+
+			printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
+				" already exists, ignoring request\n",
+				lu_gp_id);
+			spin_unlock(&se_global->lu_gps_lock);
+			return -1;
+		}
+	}
+
+	lu_gp->lu_gp_id = lu_gp_id_tmp;
+	lu_gp->lu_gp_valid_id = 1;
+	list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
+	se_global->alua_lu_gps_count++;
+	spin_unlock(&se_global->lu_gps_lock);
+
+	return 0;
+}
+
+static struct t10_alua_lu_gp_member *
+core_alua_allocate_lu_gp_mem(struct se_device *dev)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
+	if (!(lu_gp_mem)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
+	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
+	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
+
+	lu_gp_mem->lu_gp_mem_dev = dev;
+	dev->dev_alua_lu_gp_mem = lu_gp_mem;
+
+	return lu_gp_mem;
+}
+
+void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
+	/*
+	 * Once we have reached this point, config_item_put() has
+	 * already been called from target_core_alua_drop_lu_gp().
+	 *
+	 * Here, we remove the *lu_gp from the global list so that
+	 * no associations can be made while we are releasing
+	 * struct t10_alua_lu_gp.
+	 */
+	spin_lock(&se_global->lu_gps_lock);
+	atomic_set(&lu_gp->lu_gp_shutdown, 1);
+	list_del(&lu_gp->lu_gp_list);
+	se_global->alua_lu_gps_count--;
+	spin_unlock(&se_global->lu_gps_lock);
+	/*
+	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
+	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
+	 * released with core_alua_put_lu_gp_from_name()
+	 */
+	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
+		cpu_relax();
+	/*
+	 * Release reference to struct t10_alua_lu_gp * from all associated
+	 * struct se_device.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
+				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		/*
+		 *
+		 * lu_gp_mem is assoicated with a single
+		 * struct se_device->dev_alua_lu_gp_mem, and is released when
+		 * struct se_device is released via core_alua_free_lu_gp_mem().
+		 *
+		 * If the passed lu_gp does NOT match the default_lu_gp, assume
+		 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+		 */
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		if (lu_gp != se_global->default_lu_gp)
+			__core_alua_attach_lu_gp_mem(lu_gp_mem,
+					se_global->default_lu_gp);
+		else
+			lu_gp_mem->lu_gp = NULL;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+}
+
+void core_alua_free_lu_gp_mem(struct se_device *dev)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!(lu_gp_mem))
+		return;
+
+	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
+		cpu_relax();
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if ((lu_gp)) {
+		spin_lock(&lu_gp->lu_gp_lock);
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		lu_gp_mem->lu_gp = NULL;
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
+}
+
+struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_item *ci;
+
+	spin_lock(&se_global->lu_gps_lock);
+	list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
+		if (!(lu_gp->lu_gp_valid_id))
+			continue;
+		ci = &lu_gp->lu_gp_group.cg_item;
+		if (!(strcmp(config_item_name(ci), name))) {
+			atomic_inc(&lu_gp->lu_gp_ref_cnt);
+			spin_unlock(&se_global->lu_gps_lock);
+			return lu_gp;
+		}
+	}
+	spin_unlock(&se_global->lu_gps_lock);
+
+	return NULL;
+}
+
+void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&se_global->lu_gps_lock);
+	atomic_dec(&lu_gp->lu_gp_ref_cnt);
+	spin_unlock(&se_global->lu_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_attach_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	lu_gp_mem->lu_gp = lu_gp;
+	lu_gp_mem->lu_gp_assoc = 1;
+	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
+	lu_gp->lu_gp_members++;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_drop_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_del(&lu_gp_mem->lu_gp_mem_list);
+	lu_gp_mem->lu_gp = NULL;
+	lu_gp_mem->lu_gp_assoc = 0;
+	lu_gp->lu_gp_members--;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+	struct se_subsystem_dev *su_dev,
+	const char *name,
+	int def_group)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
+	if (!(tg_pt_gp)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
+	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
+	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+	tg_pt_gp->tg_pt_gp_su_dev = su_dev;
+	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+		ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+	/*
+	 * Enable both explict and implict ALUA support by default
+	 */
+	tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+	/*
+	 * Set the default Active/NonOptimized Delay in milliseconds
+	 */
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
+
+	if (def_group) {
+		spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+		tg_pt_gp->tg_pt_gp_id =
+				T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+		tg_pt_gp->tg_pt_gp_valid_id = 1;
+		T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			      &T10_ALUA(su_dev)->tg_pt_gps_list);
+		spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	}
+
+	return tg_pt_gp;
+}
+
+int core_alua_set_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	u16 tg_pt_gp_id)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
+	u16 tg_pt_gp_id_tmp;
+	/*
+	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
+	 */
+	if (tg_pt_gp->tg_pt_gp_valid_id) {
+		printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
+			" ignoring request\n");
+		return -1;
+	}
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
+		printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
+			" 0x0000ffff reached\n");
+		spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+		return -1;
+	}
+again:
+	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
+			T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+
+	list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
+			if (!(tg_pt_gp_id))
+				goto again;
+
+			printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
+				" exists, ignoring request\n", tg_pt_gp_id);
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			return -1;
+		}
+	}
+
+	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
+	tg_pt_gp->tg_pt_gp_valid_id = 1;
+	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			&T10_ALUA(su_dev)->tg_pt_gps_list);
+	T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+	return 0;
+}
+
+struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+	struct se_port *port)
+{
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
+				GFP_KERNEL);
+	if (!(tg_pt_gp_mem)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+	spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
+
+	tg_pt_gp_mem->tg_pt = port;
+	port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
+	atomic_set(&port->sep_tg_pt_gp_active, 1);
+
+	return tg_pt_gp_mem;
+}
+
+void core_alua_free_tg_pt_gp(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+	/*
+	 * Once we have reached this point, config_item_put() has already
+	 * been called from target_core_alua_drop_tg_pt_gp().
+	 *
+	 * Here we remove *tg_pt_gp from the global list so that
+	 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
+	 */
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	list_del(&tg_pt_gp->tg_pt_gp_list);
+	T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	/*
+	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
+	 * core_alua_get_tg_pt_gp_by_name() in
+	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
+	 * to be released with core_alua_put_tg_pt_gp_from_name().
+	 */
+	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
+		cpu_relax();
+	/*
+	 * Release reference to struct t10_alua_tg_pt_gp from all associated
+	 * struct se_port.
+	 */
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
+			&tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
+		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+			tg_pt_gp->tg_pt_gp_members--;
+			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+		/*
+		 * tg_pt_gp_mem is assoicated with a single
+		 * se_port->sep_alua_tg_pt_gp_mem, and is released via
+		 * core_alua_free_tg_pt_gp_mem().
+		 *
+		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
+		 * assume we want to re-assocate a given tg_pt_gp_mem with
+		 * default_tg_pt_gp.
+		 */
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
+			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+					T10_ALUA(su_dev)->default_tg_pt_gp);
+		} else
+			tg_pt_gp_mem->tg_pt_gp = NULL;
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+}
+
+void core_alua_free_tg_pt_gp_mem(struct se_port *port)
+{
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return;
+
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem))
+		return;
+
+	while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
+		cpu_relax();
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if ((tg_pt_gp)) {
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+			tg_pt_gp->tg_pt_gp_members--;
+			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+		tg_pt_gp_mem->tg_pt_gp = NULL;
+	}
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
+}
+
+static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
+	struct se_subsystem_dev *su_dev,
+	const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct config_item *ci;
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (!(tg_pt_gp->tg_pt_gp_valid_id))
+			continue;
+		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		if (!(strcmp(config_item_name(ci), name))) {
+			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			return tg_pt_gp;
+		}
+	}
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+	return NULL;
+}
+
+static void core_alua_put_tg_pt_gp_from_name(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+void __core_alua_attach_tg_pt_gp_mem(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
+	tg_pt_gp_mem->tg_pt_gp_assoc = 1;
+	list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
+			&tg_pt_gp->tg_pt_gp_mem_list);
+	tg_pt_gp->tg_pt_gp_members++;
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+static void __core_alua_drop_tg_pt_gp_mem(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+	tg_pt_gp_mem->tg_pt_gp = NULL;
+	tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+	tg_pt_gp->tg_pt_gp_members--;
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
+{
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct config_item *tg_pt_ci;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	ssize_t len = 0;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return len;
+
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem))
+		return len;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if ((tg_pt_gp)) {
+		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
+			" %hu\nTG Port Primary Access State: %s\nTG Port "
+			"Primary Access Status: %s\nTG Port Secondary Access"
+			" State: %s\nTG Port Secondary Access Status: %s\n",
+			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
+			core_alua_dump_state(atomic_read(
+					&tg_pt_gp->tg_pt_gp_alua_access_state)),
+			core_alua_dump_status(
+				tg_pt_gp->tg_pt_gp_alua_access_status),
+			(atomic_read(&port->sep_tg_pt_secondary_offline)) ?
+			"Offline" : "None",
+			core_alua_dump_status(port->sep_tg_pt_secondary_stat));
+	}
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	return len;
+}
+
+ssize_t core_alua_store_tg_pt_gp_info(
+	struct se_port *port,
+	const char *page,
+	size_t count)
+{
+	struct se_portal_group *tpg;
+	struct se_lun *lun;
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+	int move = 0;
+
+	tpg = port->sep_tpg;
+	lun = port->sep_lun;
+
+	if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+		printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
+			" %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			TPG_TFO(tpg)->tpg_get_tag(tpg),
+			config_item_name(&lun->lun_group.cg_item));
+		return -EINVAL;
+	}
+
+	if (count > TG_PT_GROUP_NAME_BUF) {
+		printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA target port group alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
+		 * struct t10_alua_tg_pt_gp.  This reference is released with
+		 * core_alua_put_tg_pt_gp_from_name() below.
+		 */
+		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+					strstrip(buf));
+		if (!(tg_pt_gp_new))
+			return -ENODEV;
+	}
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem)) {
+		if (tg_pt_gp_new)
+			core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+		printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if ((tg_pt_gp)) {
+		/*
+		 * Clearing an existing tg_pt_gp association, and replacing
+		 * with the default_tg_pt_gp.
+		 */
+		if (!(tg_pt_gp_new)) {
+			printk(KERN_INFO "Target_Core_ConfigFS: Moving"
+				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
+				" alua/%s, ID: %hu back to"
+				" default_tg_pt_gp\n",
+				TPG_TFO(tpg)->tpg_get_wwn(tpg),
+				TPG_TFO(tpg)->tpg_get_tag(tpg),
+				config_item_name(&lun->lun_group.cg_item),
+				config_item_name(
+					&tg_pt_gp->tg_pt_gp_group.cg_item),
+				tg_pt_gp->tg_pt_gp_id);
+
+			__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+					T10_ALUA(su_dev)->default_tg_pt_gp);
+			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+			return count;
+		}
+		/*
+		 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
+		 */
+		__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+		move = 1;
+	}
+	/*
+	 * Associate tg_pt_gp_mem with tg_pt_gp_new.
+	 */
+	__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
+		"Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+		TPG_TFO(tpg)->tpg_get_tag(tpg),
+		config_item_name(&lun->lun_group.cg_item),
+		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
+		tg_pt_gp_new->tg_pt_gp_id);
+
+	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+	return count;
+}
+
+ssize_t core_alua_show_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
+	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
+		return sprintf(page, "Implict and Explict\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
+		return sprintf(page, "Implict\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
+		return sprintf(page, "Explict\n");
+	else
+		return sprintf(page, "None\n");
+}
+
+ssize_t core_alua_store_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_access_type\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
+		printk(KERN_ERR "Illegal value for alua_access_type:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	if (tmp == 3)
+		tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+	else if (tmp == 2)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+	else if (tmp == 1)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+	else
+		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
+
+	return count;
+}
+
+ssize_t core_alua_show_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
+}
+
+ssize_t core_alua_store_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
+		return -EINVAL;
+	}
+	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
+		printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_NONOP_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+}
+
+ssize_t core_alua_store_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
+		return -EINVAL;
+	}
+	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
+		printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_TRANS_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
+}
+
+ssize_t core_alua_store_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract preferred ALUA value\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
+{
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return sprintf(page, "%d\n",
+		atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
+}
+
+ssize_t core_alua_store_offline_bit(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned long tmp;
+	int ret;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem)) {
+		printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
+			lun->lun_sep, 0, (int)tmp);
+	if (ret < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_status(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
+}
+
+ssize_t core_alua_store_secondary_status(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
+		return -EINVAL;
+	}
+	if ((tmp != ALUA_STATUS_NONE) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+		printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+	lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_write_metadata(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n",
+			lun->lun_sep->sep_tg_pt_secondary_write_md);
+}
+
+ssize_t core_alua_store_secondary_write_metadata(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
+
+	return count;
+}
+
+int core_setup_alua(struct se_device *dev, int force_pt)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
+	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
+	 * cause a problem because libata and some SATA RAID HBAs appear
+	 * under Linux/SCSI, but emulate SCSI logic themselves.
+	 */
+	if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+	    !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
+		alua->alua_type = SPC_ALUA_PASSTHROUGH;
+		alua->alua_state_check = &core_alua_state_check_nop;
+		printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+			" emulation\n", TRANSPORT(dev)->name);
+		return 0;
+	}
+	/*
+	 * If SPC-3 or above is reported by real or emulated struct se_device,
+	 * use emulated ALUA.
+	 */
+	if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+		printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
+			" device\n", TRANSPORT(dev)->name);
+		/*
+		 * Assoicate this struct se_device with the default ALUA
+		 * LUN Group.
+		 */
+		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
+		if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
+			return -1;
+
+		alua->alua_type = SPC3_ALUA_EMULATED;
+		alua->alua_state_check = &core_alua_state_check;
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		__core_alua_attach_lu_gp_mem(lu_gp_mem,
+				se_global->default_lu_gp);
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
+			" core/alua/lu_gps/default_lu_gp\n",
+			TRANSPORT(dev)->name);
+	} else {
+		alua->alua_type = SPC2_ALUA_DISABLED;
+		alua->alua_state_check = &core_alua_state_check_nop;
+		printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
+			" device\n", TRANSPORT(dev)->name);
+	}
+
+	return 0;
+}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
new file mode 100644
index 0000000..c86f97a
--- /dev/null
+++ b/drivers/target/target_core_alua.h
@@ -0,0 +1,126 @@
+#ifndef TARGET_CORE_ALUA_H
+#define TARGET_CORE_ALUA_H
+
+/*
+ * INQUIRY response data, TPGS Field
+ *
+ * from spc4r17 section 6.4.2 Table 135
+ */
+#define TPGS_NO_ALUA				0x00
+#define TPGS_IMPLICT_ALUA			0x10
+#define TPGS_EXPLICT_ALUA			0x20
+
+/*
+ * ASYMMETRIC ACCESS STATE field
+ *
+ * from spc4r17 section 6.27 Table 245
+ */
+#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED	0x0
+#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1
+#define ALUA_ACCESS_STATE_STANDBY		0x2
+#define ALUA_ACCESS_STATE_UNAVAILABLE		0x3
+#define ALUA_ACCESS_STATE_OFFLINE		0xe
+#define ALUA_ACCESS_STATE_TRANSITION		0xf
+
+/*
+ * REPORT_TARGET_PORT_GROUP STATUS CODE
+ *
+ * from spc4r17 section 6.27 Table 246
+ */
+#define ALUA_STATUS_NONE				0x00
+#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG		0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA		0x02
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_04H_ALUA_STATE_TRANSITION			0x0a
+#define ASCQ_04H_ALUA_TG_PT_STANDBY			0x0b
+#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE			0x0c
+#define ASCQ_04H_ALUA_OFFLINE				0x12
+
+/*
+ * Used as the default for Active/NonOptimized delay (in milliseconds)
+ * This can also be changed via configfs on a per target port group basis..
+ */
+#define ALUA_DEFAULT_NONOP_DELAY_MSECS			100
+#define ALUA_MAX_NONOP_DELAY_MSECS			10000 /* 10 seconds */
+/*
+ * Used for implict and explict ALUA transitional delay, that is disabled
+ * by default, and is intended to be used for debugging client side ALUA code.
+ */
+#define ALUA_DEFAULT_TRANS_DELAY_MSECS			0
+#define ALUA_MAX_TRANS_DELAY_MSECS			30000 /* 30 seconds */
+/*
+ * Used by core_alua_update_tpg_primary_metadata() and
+ * core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_METADATA_PATH_LEN				512
+/*
+ * Used by core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_SECONDARY_METADATA_WWN_LEN			256
+
+extern struct kmem_cache *t10_alua_lu_gp_cache;
+extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+extern int core_emulate_report_target_port_groups(struct se_cmd *);
+extern int core_emulate_set_target_port_groups(struct se_cmd *);
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
+				struct se_device *, struct se_port *,
+				struct se_node_acl *, int, int);
+extern char *core_alua_dump_status(int);
+extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
+extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
+extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
+extern void core_alua_free_lu_gp_mem(struct se_device *);
+extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
+extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
+extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void core_alua_drop_lu_gp_dev(struct se_device *);
+extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+			struct se_subsystem_dev *, const char *, int);
+extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
+extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+					struct se_port *);
+extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
+extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
+extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
+					struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
+						size_t);
+extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
+extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+						char *);
+extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
+extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
+					size_t);
+extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
+extern ssize_t core_alua_store_secondary_status(struct se_lun *,
+					const char *, size_t);
+extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
+					char *);
+extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
+					const char *, size_t);
+extern int core_setup_alua(struct se_device *, int);
+
+#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
new file mode 100644
index 0000000..366080b
--- /dev/null
+++ b/drivers/target/target_core_cdb.c
@@ -0,0 +1,1131 @@
+/*
+ * CDB emulation for non-READ/WRITE commands.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include "target_core_ua.h"
+
+static void
+target_fill_alua_data(struct se_port *port, unsigned char *buf)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	/*
+	 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
+	 */
+	buf[5]	= 0x80;
+
+	/*
+	 * Set TPGS field for explict and/or implict ALUA access type
+	 * and opteration.
+	 *
+	 * See spc4r17 section 6.4.2 Table 135
+	 */
+	if (!port)
+		return;
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!tg_pt_gp_mem)
+		return;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (tg_pt_gp)
+		buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+}
+
+static int
+target_emulate_inquiry_std(struct se_cmd *cmd)
+{
+	struct se_lun *lun = SE_LUN(cmd);
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+
+	/*
+	 * Make sure we at least have 6 bytes of INQUIRY response
+	 * payload going back for EVPD=0
+	 */
+	if (cmd->data_length < 6) {
+		printk(KERN_ERR "SCSI Inquiry payload length: %u"
+			" too small for EVPD=0\n", cmd->data_length);
+		return -1;
+	}
+
+	buf[0] = dev->transport->get_device_type(dev);
+	if (buf[0] == TYPE_TAPE)
+		buf[1] = 0x80;
+	buf[2] = dev->transport->get_device_rev(dev);
+
+	/*
+	 * Enable SCCS and TPGS fields for Emulated ALUA
+	 */
+	if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
+		target_fill_alua_data(lun->lun_sep, buf);
+
+	if (cmd->data_length < 8) {
+		buf[4] = 1; /* Set additional length to 1 */
+		return 0;
+	}
+
+	buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
+
+	/*
+	 * Do not include vendor, product, reversion info in INQUIRY
+	 * response payload for cdbs with a small allocation length.
+	 */
+	if (cmd->data_length < 36) {
+		buf[4] = 3; /* Set additional length to 3 */
+		return 0;
+	}
+
+	snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
+	snprintf((unsigned char *)&buf[16], 16, "%s",
+		 &DEV_T10_WWN(dev)->model[0]);
+	snprintf((unsigned char *)&buf[32], 4, "%s",
+		 &DEV_T10_WWN(dev)->revision[0]);
+	buf[4] = 31; /* Set additional length to 31 */
+	return 0;
+}
+
+/* supported vital product data pages */
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+	buf[1] = 0x00;
+	if (cmd->data_length < 8)
+		return 0;
+
+	buf[4] = 0x0;
+	/*
+	 * Only report the INQUIRY EVPD=1 pages after a valid NAA
+	 * Registered Extended LUN WWN has been set via ConfigFS
+	 * during device creation/restart.
+	 */
+	if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		buf[3] = 3;
+		buf[5] = 0x80;
+		buf[6] = 0x83;
+		buf[7] = 0x86;
+	}
+
+	return 0;
+}
+
+/* unit serial number */
+static int
+target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	u16 len = 0;
+
+	buf[1] = 0x80;
+	if (dev->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		u32 unit_serial_len;
+
+		unit_serial_len =
+			strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+		unit_serial_len++; /* For NULL Terminator */
+
+		if (((len + 4) + unit_serial_len) > cmd->data_length) {
+			len += unit_serial_len;
+			buf[2] = ((len >> 8) & 0xff);
+			buf[3] = (len & 0xff);
+			return 0;
+		}
+		len += sprintf((unsigned char *)&buf[4], "%s",
+			&DEV_T10_WWN(dev)->unit_serial[0]);
+		len++; /* Extra Byte for NULL Terminator */
+		buf[3] = len;
+	}
+	return 0;
+}
+
+/*
+ * Device identification VPD, for a complete list of
+ * DESIGNATOR TYPEs see spc4r17 Table 459.
+ */
+static int
+target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_lun *lun = SE_LUN(cmd);
+	struct se_port *port = NULL;
+	struct se_portal_group *tpg = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char binary, binary_new;
+	unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
+	u32 prod_len;
+	u32 unit_serial_len, off = 0;
+	int i;
+	u16 len = 0, id_len;
+
+	buf[1] = 0x83;
+	off = 4;
+
+	/*
+	 * NAA IEEE Registered Extended Assigned designator format, see
+	 * spc4r17 section 7.7.3.6.5
+	 *
+	 * We depend upon a target_core_mod/ConfigFS provided
+	 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
+	 * value in order to return the NAA id.
+	 */
+	if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
+		goto check_t10_vend_desc;
+
+	if (off + 20 > cmd->data_length)
+		goto check_t10_vend_desc;
+
+	/* CODE SET == Binary */
+	buf[off++] = 0x1;
+
+	/* Set ASSOICATION == addressed logical unit: 0)b */
+	buf[off] = 0x00;
+
+	/* Identifier/Designator type == NAA identifier */
+	buf[off++] = 0x3;
+	off++;
+
+	/* Identifier/Designator length */
+	buf[off++] = 0x10;
+
+	/*
+	 * Start NAA IEEE Registered Extended Identifier/Designator
+	 */
+	buf[off++] = (0x6 << 4);
+
+	/*
+	 * Use OpenFabrics IEEE Company ID: 00 14 05
+	 */
+	buf[off++] = 0x01;
+	buf[off++] = 0x40;
+	buf[off] = (0x5 << 4);
+
+	/*
+	 * Return ConfigFS Unit Serial Number information for
+	 * VENDOR_SPECIFIC_IDENTIFIER and
+	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
+	 */
+	binary = transport_asciihex_to_binaryhex(
+				&DEV_T10_WWN(dev)->unit_serial[0]);
+	buf[off++] |= (binary & 0xf0) >> 4;
+	for (i = 0; i < 24; i += 2) {
+		binary_new = transport_asciihex_to_binaryhex(
+			&DEV_T10_WWN(dev)->unit_serial[i+2]);
+		buf[off] = (binary & 0x0f) << 4;
+		buf[off++] |= (binary_new & 0xf0) >> 4;
+		binary = binary_new;
+	}
+	len = 20;
+	off = (len + 4);
+
+check_t10_vend_desc:
+	/*
+	 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
+	 */
+	id_len = 8; /* For Vendor field */
+	prod_len = 4; /* For VPD Header */
+	prod_len += 8; /* For Vendor field */
+	prod_len += strlen(prod);
+	prod_len++; /* For : */
+
+	if (dev->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		unit_serial_len =
+			strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+		unit_serial_len++; /* For NULL Terminator */
+
+		if ((len + (id_len + 4) +
+		    (prod_len + unit_serial_len)) >
+				cmd->data_length) {
+			len += (prod_len + unit_serial_len);
+			goto check_port;
+		}
+		id_len += sprintf((unsigned char *)&buf[off+12],
+				"%s:%s", prod,
+				&DEV_T10_WWN(dev)->unit_serial[0]);
+	}
+	buf[off] = 0x2; /* ASCII */
+	buf[off+1] = 0x1; /* T10 Vendor ID */
+	buf[off+2] = 0x0;
+	memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
+	/* Extra Byte for NULL Terminator */
+	id_len++;
+	/* Identifier Length */
+	buf[off+3] = id_len;
+	/* Header size for Designation descriptor */
+	len += (id_len + 4);
+	off += (id_len + 4);
+	/*
+	 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
+	 */
+check_port:
+	port = lun->lun_sep;
+	if (port) {
+		struct t10_alua_lu_gp *lu_gp;
+		u32 padding, scsi_name_len;
+		u16 lu_gp_id = 0;
+		u16 tg_pt_gp_id = 0;
+		u16 tpgt;
+
+		tpg = port->sep_tpg;
+		/*
+		 * Relative target port identifer, see spc4r17
+		 * section 7.7.3.7
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+		if (((len + 4) + 8) > cmd->data_length) {
+			len += 8;
+			goto check_tpgi;
+		}
+		buf[off] =
+			(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOICATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Relative target port identifer */
+		buf[off++] |= 0x4;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		/* Skip over Obsolete field in RTPI payload
+		 * in Table 472 */
+		off += 2;
+		buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+		buf[off++] = (port->sep_rtpi & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Target port group identifier, see spc4r17
+		 * section 7.7.3.8
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+check_tpgi:
+		if (T10_ALUA(dev->se_sub_dev)->alua_type !=
+				SPC3_ALUA_EMULATED)
+			goto check_scsi_name;
+
+		if (((len + 4) + 8) > cmd->data_length) {
+			len += 8;
+			goto check_lu_gp;
+		}
+		tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+		if (!tg_pt_gp_mem)
+			goto check_lu_gp;
+
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+		if (!(tg_pt_gp)) {
+			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+			goto check_lu_gp;
+		}
+		tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+		buf[off] =
+			(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOICATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Target port group identifier */
+		buf[off++] |= 0x5;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Logical Unit Group identifier, see spc4r17
+		 * section 7.7.3.8
+		 */
+check_lu_gp:
+		if (((len + 4) + 8) > cmd->data_length) {
+			len += 8;
+			goto check_scsi_name;
+		}
+		lu_gp_mem = dev->dev_alua_lu_gp_mem;
+		if (!(lu_gp_mem))
+			goto check_scsi_name;
+
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		lu_gp = lu_gp_mem->lu_gp;
+		if (!(lu_gp)) {
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+			goto check_scsi_name;
+		}
+		lu_gp_id = lu_gp->lu_gp_id;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		/* DESIGNATOR TYPE == Logical Unit Group identifier */
+		buf[off++] |= 0x6;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((lu_gp_id >> 8) & 0xff);
+		buf[off++] = (lu_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * SCSI name string designator, see spc4r17
+		 * section 7.7.3.11
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+check_scsi_name:
+		scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
+		/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
+		scsi_name_len += 10;
+		/* Check for 4-byte padding */
+		padding = ((-scsi_name_len) & 3);
+		if (padding != 0)
+			scsi_name_len += padding;
+		/* Header size + Designation descriptor */
+		scsi_name_len += 4;
+
+		if (((len + 4) + scsi_name_len) > cmd->data_length) {
+			len += scsi_name_len;
+			goto set_len;
+		}
+		buf[off] =
+			(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOICATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == SCSI name string */
+		buf[off++] |= 0x8;
+		off += 2; /* Skip over Reserved and length */
+		/*
+		 * SCSI name string identifer containing, $FABRIC_MOD
+		 * dependent information.  For LIO-Target and iSCSI
+		 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
+		 * UTF-8 encoding.
+		 */
+		tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+		scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
+					TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
+		scsi_name_len += 1 /* Include  NULL terminator */;
+		/*
+		 * The null-terminated, null-padded (see 4.4.2) SCSI
+		 * NAME STRING field contains a UTF-8 format string.
+		 * The number of bytes in the SCSI NAME STRING field
+		 * (i.e., the value in the DESIGNATOR LENGTH field)
+		 * shall be no larger than 256 and shall be a multiple
+		 * of four.
+		 */
+		if (padding)
+			scsi_name_len += padding;
+
+		buf[off-1] = scsi_name_len;
+		off += scsi_name_len;
+		/* Header size + Designation descriptor */
+		len += (scsi_name_len + 4);
+	}
+set_len:
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
+	return 0;
+}
+
+/* Extended INQUIRY Data VPD Page */
+static int
+target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+{
+	if (cmd->data_length < 60)
+		return 0;
+
+	buf[1] = 0x86;
+	buf[2] = 0x3c;
+	/* Set HEADSUP, ORDSUP, SIMPSUP */
+	buf[5] = 0x07;
+
+	/* If WriteCache emulation is enabled, set V_SUP */
+	if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
+		buf[6] = 0x01;
+	return 0;
+}
+
+/* Block Limits VPD page */
+static int
+target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	int have_tp = 0;
+
+	/*
+	 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
+	 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
+	 * different page length for Thin Provisioning.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+		have_tp = 1;
+
+	if (cmd->data_length < (0x10 + 4)) {
+		printk(KERN_INFO "Received data_length: %u"
+			" too small for EVPD 0xb0\n",
+			cmd->data_length);
+		return -1;
+	}
+
+	if (have_tp && cmd->data_length < (0x3c + 4)) {
+		printk(KERN_INFO "Received data_length: %u"
+			" too small for TPE=1 EVPD 0xb0\n",
+			cmd->data_length);
+		have_tp = 0;
+	}
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[1] = 0xb0;
+	buf[3] = have_tp ? 0x3c : 0x10;
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
+	 */
+	put_unaligned_be16(1, &buf[6]);
+
+	/*
+	 * Set MAXIMUM TRANSFER LENGTH
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
+
+	/*
+	 * Exit now if we don't support TP or the initiator sent a too
+	 * short buffer.
+	 */
+	if (!have_tp || cmd->data_length < (0x3c + 4))
+		return 0;
+
+	/*
+	 * Set MAXIMUM UNMAP LBA COUNT
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
+
+	/*
+	 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
+			   &buf[24]);
+
+	/*
+	 * Set OPTIMAL UNMAP GRANULARITY
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
+
+	/*
+	 * UNMAP GRANULARITY ALIGNMENT
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
+			   &buf[32]);
+	if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
+		buf[32] |= 0x80; /* Set the UGAVALID bit */
+
+	return 0;
+}
+
+/* Thin Provisioning VPD */
+static int
+target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+
+	/*
+	 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
+	 *
+	 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
+	 * zero, then the page length shall be set to 0004h.  If the DP bit
+	 * is set to one, then the page length shall be set to the value
+	 * defined in table 162.
+	 */
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[1] = 0xb2;
+
+	/*
+	 * Set Hardcoded length mentioned above for DP=0
+	 */
+	put_unaligned_be16(0x0004, &buf[2]);
+
+	/*
+	 * The THRESHOLD EXPONENT field indicates the threshold set size in
+	 * LBAs as a power of 2 (i.e., the threshold set size is equal to
+	 * 2(threshold exponent)).
+	 *
+	 * Note that this is currently set to 0x00 as mkp says it will be
+	 * changing again.  We can enable this once it has settled in T10
+	 * and is actually used by Linux/SCSI ML code.
+	 */
+	buf[4] = 0x00;
+
+	/*
+	 * A TPU bit set to one indicates that the device server supports
+	 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
+	 * that the device server does not support the UNMAP command.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpu != 0)
+		buf[5] = 0x80;
+
+	/*
+	 * A TPWS bit set to one indicates that the device server supports
+	 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
+	 * A TPWS bit set to zero indicates that the device server does not
+	 * support the use of the WRITE SAME (16) command to unmap LBAs.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpws != 0)
+		buf[5] |= 0x40;
+
+	return 0;
+}
+
+static int
+target_emulate_inquiry(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned char *cdb = cmd->t_task->t_task_cdb;
+
+	if (!(cdb[1] & 0x1))
+		return target_emulate_inquiry_std(cmd);
+
+	/*
+	 * Make sure we at least have 4 bytes of INQUIRY response
+	 * payload for 0x00 going back for EVPD=1.  Note that 0x80
+	 * and 0x83 will check for enough payload data length and
+	 * jump to set_len: label when there is not enough inquiry EVPD
+	 * payload length left for the next outgoing EVPD metadata
+	 */
+	if (cmd->data_length < 4) {
+		printk(KERN_ERR "SCSI Inquiry payload length: %u"
+			" too small for EVPD=1\n", cmd->data_length);
+		return -1;
+	}
+	buf[0] = dev->transport->get_device_type(dev);
+
+	switch (cdb[2]) {
+	case 0x00:
+		return target_emulate_evpd_00(cmd, buf);
+	case 0x80:
+		return target_emulate_evpd_80(cmd, buf);
+	case 0x83:
+		return target_emulate_evpd_83(cmd, buf);
+	case 0x86:
+		return target_emulate_evpd_86(cmd, buf);
+	case 0xb0:
+		return target_emulate_evpd_b0(cmd, buf);
+	case 0xb2:
+		return target_emulate_evpd_b2(cmd, buf);
+	default:
+		printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+target_emulate_readcapacity(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	u32 blocks = dev->transport->get_blocks(dev);
+
+	buf[0] = (blocks >> 24) & 0xff;
+	buf[1] = (blocks >> 16) & 0xff;
+	buf[2] = (blocks >> 8) & 0xff;
+	buf[3] = blocks & 0xff;
+	buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+	buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+	buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+	buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
+	/*
+	 * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
+	*/
+	if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+		put_unaligned_be32(0xFFFFFFFF, &buf[0]);
+
+	return 0;
+}
+
+static int
+target_emulate_readcapacity_16(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned long long blocks = dev->transport->get_blocks(dev);
+
+	buf[0] = (blocks >> 56) & 0xff;
+	buf[1] = (blocks >> 48) & 0xff;
+	buf[2] = (blocks >> 40) & 0xff;
+	buf[3] = (blocks >> 32) & 0xff;
+	buf[4] = (blocks >> 24) & 0xff;
+	buf[5] = (blocks >> 16) & 0xff;
+	buf[6] = (blocks >> 8) & 0xff;
+	buf[7] = blocks & 0xff;
+	buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+	buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+	buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+	buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
+	/*
+	 * Set Thin Provisioning Enable bit following sbc3r22 in section
+	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+		buf[14] = 0x80;
+
+	return 0;
+}
+
+static int
+target_modesense_rwrecovery(unsigned char *p)
+{
+	p[0] = 0x01;
+	p[1] = 0x0a;
+
+	return 12;
+}
+
+static int
+target_modesense_control(struct se_device *dev, unsigned char *p)
+{
+	p[0] = 0x0a;
+	p[1] = 0x0a;
+	p[2] = 2;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
+	 *
+	 * 00b: The logical unit shall clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when a com-
+	 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
+	 * status.
+	 *
+	 * 10b: The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when
+	 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
+	 * CONFLICT status.
+	 *
+	 * 11b a The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall establish a unit attention condition for the
+	 * initiator port associated with the I_T nexus on which the BUSY,
+	 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
+	 * Depending on the status, the additional sense code shall be set to
+	 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
+	 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
+	 * command, a unit attention condition shall be established only once
+	 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
+	 * to the number of commands completed with one of those status codes.
+	 */
+	p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
+	       (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Task Aborted Status (TAS) bit set to zero.
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
+	p[8] = 0xff;
+	p[9] = 0xff;
+	p[11] = 30;
+
+	return 12;
+}
+
+static int
+target_modesense_caching(struct se_device *dev, unsigned char *p)
+{
+	p[0] = 0x08;
+	p[1] = 0x12;
+	if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
+		p[2] = 0x04; /* Write Cache Enable */
+	p[12] = 0x20; /* Disabled Read Ahead */
+
+	return 20;
+}
+
+static void
+target_modesense_write_protect(unsigned char *buf, int type)
+{
+	/*
+	 * I believe that the WP bit (bit 7) in the mode header is the same for
+	 * all device types..
+	 */
+	switch (type) {
+	case TYPE_DISK:
+	case TYPE_TAPE:
+	default:
+		buf[0] |= 0x80; /* WP bit */
+		break;
+	}
+}
+
+static void
+target_modesense_dpofua(unsigned char *buf, int type)
+{
+	switch (type) {
+	case TYPE_DISK:
+		buf[0] |= 0x10; /* DPOFUA bit */
+		break;
+	default:
+		break;
+	}
+}
+
+static int
+target_emulate_modesense(struct se_cmd *cmd, int ten)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	char *cdb = cmd->t_task->t_task_cdb;
+	unsigned char *rbuf = cmd->t_task->t_task_buf;
+	int type = dev->transport->get_device_type(dev);
+	int offset = (ten) ? 8 : 4;
+	int length = 0;
+	unsigned char buf[SE_MODE_PAGE_BUF];
+
+	memset(buf, 0, SE_MODE_PAGE_BUF);
+
+	switch (cdb[2] & 0x3f) {
+	case 0x01:
+		length = target_modesense_rwrecovery(&buf[offset]);
+		break;
+	case 0x08:
+		length = target_modesense_caching(dev, &buf[offset]);
+		break;
+	case 0x0a:
+		length = target_modesense_control(dev, &buf[offset]);
+		break;
+	case 0x3f:
+		length = target_modesense_rwrecovery(&buf[offset]);
+		length += target_modesense_caching(dev, &buf[offset+length]);
+		length += target_modesense_control(dev, &buf[offset+length]);
+		break;
+	default:
+		printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
+				cdb[2] & 0x3f);
+		return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+	}
+	offset += length;
+
+	if (ten) {
+		offset -= 2;
+		buf[0] = (offset >> 8) & 0xff;
+		buf[1] = offset & 0xff;
+
+		if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+		    (cmd->se_deve &&
+		    (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+			target_modesense_write_protect(&buf[3], type);
+
+		if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+		    (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+			target_modesense_dpofua(&buf[3], type);
+
+		if ((offset + 2) > cmd->data_length)
+			offset = cmd->data_length;
+
+	} else {
+		offset -= 1;
+		buf[0] = offset & 0xff;
+
+		if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+		    (cmd->se_deve &&
+		    (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+			target_modesense_write_protect(&buf[2], type);
+
+		if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+		    (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+			target_modesense_dpofua(&buf[2], type);
+
+		if ((offset + 1) > cmd->data_length)
+			offset = cmd->data_length;
+	}
+	memcpy(rbuf, buf, offset);
+
+	return 0;
+}
+
+static int
+target_emulate_request_sense(struct se_cmd *cmd)
+{
+	unsigned char *cdb = cmd->t_task->t_task_cdb;
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	u8 ua_asc = 0, ua_ascq = 0;
+
+	if (cdb[1] & 0x01) {
+		printk(KERN_ERR "REQUEST_SENSE description emulation not"
+			" supported\n");
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+	if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
+		/*
+		 * CURRENT ERROR, UNIT ATTENTION
+		 */
+		buf[0] = 0x70;
+		buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+		/*
+		 * Make sure request data length is enough for additional
+		 * sense data.
+		 */
+		if (cmd->data_length <= 18) {
+			buf[7] = 0x00;
+			return 0;
+		}
+		/*
+		 * The Additional Sense Code (ASC) from the UNIT ATTENTION
+		 */
+		buf[SPC_ASC_KEY_OFFSET] = ua_asc;
+		buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
+		buf[7] = 0x0A;
+	} else {
+		/*
+		 * CURRENT ERROR, NO SENSE
+		 */
+		buf[0] = 0x70;
+		buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
+		/*
+		 * Make sure request data length is enough for additional
+		 * sense data.
+		 */
+		if (cmd->data_length <= 18) {
+			buf[7] = 0x00;
+			return 0;
+		}
+		/*
+		 * NO ADDITIONAL SENSE INFORMATION
+		 */
+		buf[SPC_ASC_KEY_OFFSET] = 0x00;
+		buf[7] = 0x0A;
+	}
+
+	return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_unmap(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
+	unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+	sector_t lba;
+	unsigned int size = cmd->data_length, range;
+	int ret, offset;
+	unsigned short dl, bd_dl;
+
+	/* First UNMAP block descriptor starts at 8 byte offset */
+	offset = 8;
+	size -= 8;
+	dl = get_unaligned_be16(&cdb[0]);
+	bd_dl = get_unaligned_be16(&cdb[2]);
+	ptr = &buf[offset];
+	printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+	while (size) {
+		lba = get_unaligned_be64(&ptr[0]);
+		range = get_unaligned_be32(&ptr[8]);
+		printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
+				 (unsigned long long)lba, range);
+
+		ret = dev->transport->do_discard(dev, lba, range);
+		if (ret < 0) {
+			printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
+					ret);
+			return -1;
+		}
+
+		ptr += 16;
+		size -= 16;
+	}
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_write_same(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = SE_DEV(cmd);
+	sector_t lba = cmd->t_task->t_task_lba;
+	unsigned int range;
+	int ret;
+
+	range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+
+	printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
+			 (unsigned long long)lba, range);
+
+	ret = dev->transport->do_discard(dev, lba, range);
+	if (ret < 0) {
+		printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
+		return -1;
+	}
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+int
+transport_emulate_control_cdb(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned short service_action;
+	int ret = 0;
+
+	switch (cmd->t_task->t_task_cdb[0]) {
+	case INQUIRY:
+		ret = target_emulate_inquiry(cmd);
+		break;
+	case READ_CAPACITY:
+		ret = target_emulate_readcapacity(cmd);
+		break;
+	case MODE_SENSE:
+		ret = target_emulate_modesense(cmd, 0);
+		break;
+	case MODE_SENSE_10:
+		ret = target_emulate_modesense(cmd, 1);
+		break;
+	case SERVICE_ACTION_IN:
+		switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
+		case SAI_READ_CAPACITY_16:
+			ret = target_emulate_readcapacity_16(cmd);
+			break;
+		default:
+			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
+				cmd->t_task->t_task_cdb[1] & 0x1f);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		break;
+	case REQUEST_SENSE:
+		ret = target_emulate_request_sense(cmd);
+		break;
+	case UNMAP:
+		if (!dev->transport->do_discard) {
+			printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
+					dev->transport->name);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		ret = target_emulate_unmap(task);
+		break;
+	case WRITE_SAME_16:
+		if (!dev->transport->do_discard) {
+			printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
+					" for: %s\n", dev->transport->name);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		ret = target_emulate_write_same(task);
+		break;
+	case VARIABLE_LENGTH_CMD:
+		service_action =
+			get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
+		switch (service_action) {
+		case WRITE_SAME_32:
+			if (!dev->transport->do_discard) {
+				printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
+					" supported for: %s\n",
+					dev->transport->name);
+				return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+			}
+			ret = target_emulate_write_same(task);
+			break;
+		default:
+			printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
+					" 0x%02x\n", service_action);
+			break;
+		}
+		break;
+	case SYNCHRONIZE_CACHE:
+	case 0x91: /* SYNCHRONIZE_CACHE_16: */
+		if (!dev->transport->do_sync_cache) {
+			printk(KERN_ERR
+				"SYNCHRONIZE_CACHE emulation not supported"
+				" for: %s\n", dev->transport->name);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		dev->transport->do_sync_cache(task);
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case ERASE:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case SPACE:
+	case START_STOP:
+	case TEST_UNIT_READY:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+		break;
+	default:
+		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
+			cmd->t_task->t_task_cdb[0], dev->transport->name);
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+
+	if (ret < 0)
+		return ret;
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
new file mode 100644
index 0000000..2764510
--- /dev/null
+++ b/drivers/target/target_core_configfs.c
@@ -0,0 +1,3225 @@
+/*******************************************************************************
+ * Filename:  target_core_configfs.c
+ *
+ * This file contains ConfigFS logic for the Generic Target Engine project.
+ *
+ * Copyright (c) 2008-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * based on configfs Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+#include <linux/proc_fs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_rd.h"
+
+static struct list_head g_tf_list;
+static struct mutex g_tf_lock;
+
+struct target_core_configfs_attribute {
+	struct configfs_attribute attr;
+	ssize_t (*show)(void *, char *);
+	ssize_t (*store)(void *, const char *, size_t);
+};
+
+static inline struct se_hba *
+item_to_hba(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_hba, hba_group);
+}
+
+/*
+ * Attributes for /sys/kernel/config/target/
+ */
+static ssize_t target_core_attr_show(struct config_item *item,
+				      struct configfs_attribute *attr,
+				      char *page)
+{
+	return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
+		" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
+		utsname()->sysname, utsname()->machine);
+}
+
+static struct configfs_item_operations target_core_fabric_item_ops = {
+	.show_attribute = target_core_attr_show,
+};
+
+static struct configfs_attribute target_core_item_attr_version = {
+	.ca_owner	= THIS_MODULE,
+	.ca_name	= "version",
+	.ca_mode	= S_IRUGO,
+};
+
+static struct target_fabric_configfs *target_core_get_fabric(
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+
+	if (!(name))
+		return NULL;
+
+	mutex_lock(&g_tf_lock);
+	list_for_each_entry(tf, &g_tf_list, tf_list) {
+		if (!(strcmp(tf->tf_name, name))) {
+			atomic_inc(&tf->tf_access_cnt);
+			mutex_unlock(&g_tf_lock);
+			return tf;
+		}
+	}
+	mutex_unlock(&g_tf_lock);
+
+	return NULL;
+}
+
+/*
+ * Called from struct target_core_group_ops->make_group()
+ */
+static struct config_group *target_core_register_fabric(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
+			" %s\n", group, name);
+	/*
+	 * Ensure that TCM subsystem plugins are loaded at this point for
+	 * using the RAMDISK_DR virtual LUN 0 and all other struct se_port
+	 * LUN symlinks.
+	 */
+	if (transport_subsystem_check_init() < 0)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Below are some hardcoded request_module() calls to automatically
+	 * local fabric modules when the following is called:
+	 *
+	 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
+	 *
+	 * Note that this does not limit which TCM fabric module can be
+	 * registered, but simply provids auto loading logic for modules with
+	 * mkdir(2) system calls with known TCM fabric modules.
+	 */
+	if (!(strncmp(name, "iscsi", 5))) {
+		/*
+		 * Automatically load the LIO Target fabric module when the
+		 * following is called:
+		 *
+		 * mkdir -p $CONFIGFS/target/iscsi
+		 */
+		ret = request_module("iscsi_target_mod");
+		if (ret < 0) {
+			printk(KERN_ERR "request_module() failed for"
+				" iscsi_target_mod.ko: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	} else if (!(strncmp(name, "loopback", 8))) {
+		/*
+		 * Automatically load the tcm_loop fabric module when the
+		 * following is called:
+		 *
+		 * mkdir -p $CONFIGFS/target/loopback
+		 */
+		ret = request_module("tcm_loop");
+		if (ret < 0) {
+			printk(KERN_ERR "request_module() failed for"
+				" tcm_loop.ko: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	tf = target_core_get_fabric(name);
+	if (!(tf)) {
+		printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
+			name);
+		return ERR_PTR(-EINVAL);
+	}
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
+			" %s\n", tf->tf_name);
+	/*
+	 * On a successful target_core_get_fabric() look, the returned
+	 * struct target_fabric_configfs *tf will contain a usage reference.
+	 */
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+			&TF_CIT_TMPL(tf)->tfc_wwn_cit);
+
+	tf->tf_group.default_groups = tf->tf_default_groups;
+	tf->tf_group.default_groups[0] = &tf->tf_disc_group;
+	tf->tf_group.default_groups[1] = NULL;
+
+	config_group_init_type_name(&tf->tf_group, name,
+			&TF_CIT_TMPL(tf)->tfc_wwn_cit);
+	config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
+			&TF_CIT_TMPL(tf)->tfc_discovery_cit);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+			" %s\n", tf->tf_group.cg_item.ci_name);
+	/*
+	 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
+	 */
+	tf->tf_ops.tf_subsys = tf->tf_subsys;
+	tf->tf_fabric = &tf->tf_group.cg_item;
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+			" for %s\n", name);
+
+	return &tf->tf_group;
+}
+
+/*
+ * Called from struct target_core_group_ops->drop_item()
+ */
+static void target_core_deregister_fabric(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct target_fabric_configfs *tf = container_of(
+		to_config_group(item), struct target_fabric_configfs, tf_group);
+	struct config_group *tf_group;
+	struct config_item *df_item;
+	int i;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+		" tf list\n", config_item_name(item));
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+			" %s\n", tf->tf_name);
+	atomic_dec(&tf->tf_access_cnt);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
+			" tf->tf_fabric for %s\n", tf->tf_name);
+	tf->tf_fabric = NULL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+			" %s\n", config_item_name(item));
+
+	tf_group = &tf->tf_group;
+	for (i = 0; tf_group->default_groups[i]; i++) {
+		df_item = &tf_group->default_groups[i]->cg_item;
+		tf_group->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_fabric_group_ops = {
+	.make_group	= &target_core_register_fabric,
+	.drop_item	= &target_core_deregister_fabric,
+};
+
+/*
+ * All item attributes appearing in /sys/kernel/target/ appear here.
+ */
+static struct configfs_attribute *target_core_fabric_item_attrs[] = {
+	&target_core_item_attr_version,
+	NULL,
+};
+
+/*
+ * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
+ */
+static struct config_item_type target_core_fabrics_item = {
+	.ct_item_ops	= &target_core_fabric_item_ops,
+	.ct_group_ops	= &target_core_fabric_group_ops,
+	.ct_attrs	= target_core_fabric_item_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct configfs_subsystem target_core_fabrics = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf = "target",
+			.ci_type = &target_core_fabrics_item,
+		},
+	},
+};
+
+static struct configfs_subsystem *target_core_subsystem[] = {
+	&target_core_fabrics,
+	NULL,
+};
+
+/*##############################################################################
+// Start functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/*
+ * First function called by fabric modules to:
+ *
+ * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
+ * 2) Add struct target_fabric_configfs to g_tf_list
+ * 3) Return struct target_fabric_configfs to fabric module to be passed
+ *    into target_fabric_configfs_register().
+ */
+struct target_fabric_configfs *target_fabric_configfs_init(
+	struct module *fabric_mod,
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+
+	if (!(fabric_mod)) {
+		printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
+		return NULL;
+	}
+	if (!(name)) {
+		printk(KERN_ERR "Unable to locate passed fabric name\n");
+		return NULL;
+	}
+	if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
+		printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
+			"_NAME_SIZE\n", name);
+		return NULL;
+	}
+
+	tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
+	if (!(tf))
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&tf->tf_list);
+	atomic_set(&tf->tf_access_cnt, 0);
+	/*
+	 * Setup the default generic struct config_item_type's (cits) in
+	 * struct target_fabric_configfs->tf_cit_tmpl
+	 */
+	tf->tf_module = fabric_mod;
+	target_fabric_setup_cits(tf);
+
+	tf->tf_subsys = target_core_subsystem[0];
+	snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
+
+	mutex_lock(&g_tf_lock);
+	list_add_tail(&tf->tf_list, &g_tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+			">>>>>>>>>>>>>>\n");
+	printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
+			" %s\n", tf, tf->tf_name);
+	return tf;
+}
+EXPORT_SYMBOL(target_fabric_configfs_init);
+
+/*
+ * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
+ */
+void target_fabric_configfs_free(
+	struct target_fabric_configfs *tf)
+{
+	mutex_lock(&g_tf_lock);
+	list_del(&tf->tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	kfree(tf);
+}
+EXPORT_SYMBOL(target_fabric_configfs_free);
+
+/*
+ * Perform a sanity check of the passed tf->tf_ops before completing
+ * TCM fabric module registration.
+ */
+static int target_fabric_tf_ops_check(
+	struct target_fabric_configfs *tf)
+{
+	struct target_core_fabric_ops *tfo = &tf->tf_ops;
+
+	if (!(tfo->get_fabric_name)) {
+		printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_fabric_proto_ident)) {
+		printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_wwn)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_tag)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_default_depth)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_pr_transport_id)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_pr_transport_id_len)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_demo_mode)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_demo_mode_cache)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_demo_mode_write_protect)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_prod_mode_write_protect)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_alloc_fabric_acl)) {
+		printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_release_fabric_acl)) {
+		printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_inst_index)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->release_cmd_to_pool)) {
+		printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->release_cmd_direct)) {
+		printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->shutdown_session)) {
+		printk(KERN_ERR "Missing tfo->shutdown_session()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->close_session)) {
+		printk(KERN_ERR "Missing tfo->close_session()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->stop_session)) {
+		printk(KERN_ERR "Missing tfo->stop_session()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fall_back_to_erl0)) {
+		printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->sess_logged_in)) {
+		printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->sess_get_index)) {
+		printk(KERN_ERR "Missing tfo->sess_get_index()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->write_pending)) {
+		printk(KERN_ERR "Missing tfo->write_pending()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->write_pending_status)) {
+		printk(KERN_ERR "Missing tfo->write_pending_status()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->set_default_node_attributes)) {
+		printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_task_tag)) {
+		printk(KERN_ERR "Missing tfo->get_task_tag()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_cmd_state)) {
+		printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->new_cmd_failure)) {
+		printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->queue_data_in)) {
+		printk(KERN_ERR "Missing tfo->queue_data_in()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->queue_status)) {
+		printk(KERN_ERR "Missing tfo->queue_status()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->queue_tm_rsp)) {
+		printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->set_fabric_sense_len)) {
+		printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_fabric_sense_len)) {
+		printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->is_state_remove)) {
+		printk(KERN_ERR "Missing tfo->is_state_remove()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->pack_lun)) {
+		printk(KERN_ERR "Missing tfo->pack_lun()\n");
+		return -EINVAL;
+	}
+	/*
+	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
+	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+	 * target_core_fabric_configfs.c WWN+TPG group context code.
+	 */
+	if (!(tfo->fabric_make_wwn)) {
+		printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fabric_drop_wwn)) {
+		printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fabric_make_tpg)) {
+		printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fabric_drop_tpg)) {
+		printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Called 2nd from fabric module with returned parameter of
+ * struct target_fabric_configfs * from target_fabric_configfs_init().
+ *
+ * Upon a successful registration, the new fabric's struct config_item is
+ * return.  Also, a pointer to this struct is set in the passed
+ * struct target_fabric_configfs.
+ */
+int target_fabric_configfs_register(
+	struct target_fabric_configfs *tf)
+{
+	struct config_group *su_group;
+	int ret;
+
+	if (!(tf)) {
+		printk(KERN_ERR "Unable to locate target_fabric_configfs"
+			" pointer\n");
+		return -EINVAL;
+	}
+	if (!(tf->tf_subsys)) {
+		printk(KERN_ERR "Unable to target struct config_subsystem"
+			" pointer\n");
+		return -EINVAL;
+	}
+	su_group = &tf->tf_subsys->su_group;
+	if (!(su_group)) {
+		printk(KERN_ERR "Unable to locate target struct config_group"
+			" pointer\n");
+		return -EINVAL;
+	}
+	ret = target_fabric_tf_ops_check(tf);
+	if (ret < 0)
+		return ret;
+
+	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+		">>>>>>>>>>\n");
+	return 0;
+}
+EXPORT_SYMBOL(target_fabric_configfs_register);
+
+void target_fabric_configfs_deregister(
+	struct target_fabric_configfs *tf)
+{
+	struct config_group *su_group;
+	struct configfs_subsystem *su;
+
+	if (!(tf)) {
+		printk(KERN_ERR "Unable to locate passed target_fabric_"
+			"configfs\n");
+		return;
+	}
+	su = tf->tf_subsys;
+	if (!(su)) {
+		printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
+			" pointer\n");
+		return;
+	}
+	su_group = &tf->tf_subsys->su_group;
+	if (!(su_group)) {
+		printk(KERN_ERR "Unable to locate target struct config_group"
+			" pointer\n");
+		return;
+	}
+
+	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+			">>>>>>>>>>>>\n");
+	mutex_lock(&g_tf_lock);
+	if (atomic_read(&tf->tf_access_cnt)) {
+		mutex_unlock(&g_tf_lock);
+		printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
+			tf->tf_name);
+		BUG();
+	}
+	list_del(&tf->tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+			" %s\n", tf->tf_name);
+	tf->tf_module = NULL;
+	tf->tf_subsys = NULL;
+	kfree(tf);
+
+	printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+			">>>>>\n");
+	return;
+}
+EXPORT_SYMBOL(target_fabric_configfs_deregister);
+
+/*##############################################################################
+// Stop functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/* Start functions for struct config_item_type target_core_dev_attrib_cit */
+
+#define DEF_DEV_ATTRIB_SHOW(_name)					\
+static ssize_t target_core_dev_show_attr_##_name(			\
+	struct se_dev_attrib *da,					\
+	char *page)							\
+{									\
+	struct se_device *dev;						\
+	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
+	ssize_t rb;							\
+									\
+	spin_lock(&se_dev->se_dev_lock);				\
+	dev = se_dev->se_dev_ptr;					\
+	if (!(dev)) {							\
+		spin_unlock(&se_dev->se_dev_lock); 			\
+		return -ENODEV;						\
+	}								\
+	rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
+	spin_unlock(&se_dev->se_dev_lock);				\
+									\
+	return rb;							\
+}
+
+#define DEF_DEV_ATTRIB_STORE(_name)					\
+static ssize_t target_core_dev_store_attr_##_name(			\
+	struct se_dev_attrib *da,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct se_device *dev;						\
+	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
+	unsigned long val;						\
+	int ret;							\
+									\
+	spin_lock(&se_dev->se_dev_lock);				\
+	dev = se_dev->se_dev_ptr;					\
+	if (!(dev)) {							\
+		spin_unlock(&se_dev->se_dev_lock);			\
+		return -ENODEV;						\
+	}								\
+	ret = strict_strtoul(page, 0, &val);				\
+	if (ret < 0) {							\
+		spin_unlock(&se_dev->se_dev_lock);                      \
+		printk(KERN_ERR "strict_strtoul() failed with"		\
+			" ret: %d\n", ret);				\
+		return -EINVAL;						\
+	}								\
+	ret = se_dev_set_##_name(dev, (u32)val);			\
+	spin_unlock(&se_dev->se_dev_lock);				\
+									\
+	return (!ret) ? count : -EINVAL;				\
+}
+
+#define DEF_DEV_ATTRIB(_name)						\
+DEF_DEV_ATTRIB_SHOW(_name);						\
+DEF_DEV_ATTRIB_STORE(_name);
+
+#define DEF_DEV_ATTRIB_RO(_name)					\
+DEF_DEV_ATTRIB_SHOW(_name);
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
+#define SE_DEV_ATTR(_name, _mode)					\
+static struct target_core_dev_attrib_attribute				\
+			target_core_dev_attrib_##_name =		\
+		__CONFIGFS_EATTR(_name, _mode,				\
+		target_core_dev_show_attr_##_name,			\
+		target_core_dev_store_attr_##_name);
+
+#define SE_DEV_ATTR_RO(_name);						\
+static struct target_core_dev_attrib_attribute				\
+			target_core_dev_attrib_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_dev_show_attr_##_name);
+
+DEF_DEV_ATTRIB(emulate_dpo);
+SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_write);
+SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_read);
+SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_write_cache);
+SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
+SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tas);
+SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpu);
+SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpws);
+SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(enforce_pr_isids);
+SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_block_size);
+SE_DEV_ATTR_RO(hw_block_size);
+
+DEF_DEV_ATTRIB(block_size);
+SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_max_sectors);
+SE_DEV_ATTR_RO(hw_max_sectors);
+
+DEF_DEV_ATTRIB(max_sectors);
+SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(optimal_sectors);
+SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_queue_depth);
+SE_DEV_ATTR_RO(hw_queue_depth);
+
+DEF_DEV_ATTRIB(queue_depth);
+SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(task_timeout);
+SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_lba_count);
+SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_block_desc_count);
+SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity);
+SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity_alignment);
+SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
+
+static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
+	&target_core_dev_attrib_emulate_dpo.attr,
+	&target_core_dev_attrib_emulate_fua_write.attr,
+	&target_core_dev_attrib_emulate_fua_read.attr,
+	&target_core_dev_attrib_emulate_write_cache.attr,
+	&target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
+	&target_core_dev_attrib_emulate_tas.attr,
+	&target_core_dev_attrib_emulate_tpu.attr,
+	&target_core_dev_attrib_emulate_tpws.attr,
+	&target_core_dev_attrib_enforce_pr_isids.attr,
+	&target_core_dev_attrib_hw_block_size.attr,
+	&target_core_dev_attrib_block_size.attr,
+	&target_core_dev_attrib_hw_max_sectors.attr,
+	&target_core_dev_attrib_max_sectors.attr,
+	&target_core_dev_attrib_optimal_sectors.attr,
+	&target_core_dev_attrib_hw_queue_depth.attr,
+	&target_core_dev_attrib_queue_depth.attr,
+	&target_core_dev_attrib_task_timeout.attr,
+	&target_core_dev_attrib_max_unmap_lba_count.attr,
+	&target_core_dev_attrib_max_unmap_block_desc_count.attr,
+	&target_core_dev_attrib_unmap_granularity.attr,
+	&target_core_dev_attrib_unmap_granularity_alignment.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_attrib_ops = {
+	.show_attribute		= target_core_dev_attrib_attr_show,
+	.store_attribute	= target_core_dev_attrib_attr_store,
+};
+
+static struct config_item_type target_core_dev_attrib_cit = {
+	.ct_item_ops		= &target_core_dev_attrib_ops,
+	.ct_attrs		= target_core_dev_attrib_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_attrib_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_wwn_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
+#define SE_DEV_WWN_ATTR(_name, _mode)					\
+static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
+		__CONFIGFS_EATTR(_name, _mode,				\
+		target_core_dev_wwn_show_attr_##_name,			\
+		target_core_dev_wwn_store_attr_##_name);
+
+#define SE_DEV_WWN_ATTR_RO(_name);					\
+do {									\
+	static struct target_core_dev_wwn_attribute			\
+			target_core_dev_wwn_##_name =			\
+		__CONFIGFS_EATTR_RO(_name,				\
+		target_core_dev_wwn_show_attr_##_name);			\
+} while (0);
+
+/*
+ * VPD page 0x80 Unit serial
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
+	struct t10_wwn *t10_wwn,
+	char *page)
+{
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+
+	dev = se_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
+		&t10_wwn->unit_serial[0]);
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+	unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+
+	/*
+	 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
+	 * from the struct scsi_device level firmware, do not allow
+	 * VPD Unit Serial to be emulated.
+	 *
+	 * Note this struct scsi_device could also be emulating VPD
+	 * information from its drivers/scsi LLD.  But for now we assume
+	 * it is doing 'the right thing' wrt a world wide unique
+	 * VPD Unit Serial Number that OS dependent multipath can depend on.
+	 */
+	if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+		printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
+			" Unit Serial, ignoring request\n");
+		return -EOPNOTSUPP;
+	}
+
+	if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
+		printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
+		" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
+		return -EOVERFLOW;
+	}
+	/*
+	 * Check to see if any active $FABRIC_MOD exports exist.  If they
+	 * do exist, fail here as changing this information on the fly
+	 * (underneath the initiator side OS dependent multipath code)
+	 * could cause negative effects.
+	 */
+	dev = su_dev->se_dev_ptr;
+	if ((dev)) {
+		if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+			printk(KERN_ERR "Unable to set VPD Unit Serial while"
+				" active %d $FABRIC_MOD exports exist\n",
+				atomic_read(&dev->dev_export_obj.obj_access_count));
+			return -EINVAL;
+		}
+	}
+	/*
+	 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
+	 *
+	 * Also, strip any newline added from the userspace
+	 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
+	 */
+	memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
+	snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
+	snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+			"%s", strstrip(buf));
+	su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+			" %s\n", su_dev->t10_wwn.unit_serial);
+
+	return count;
+}
+
+SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Protocol Identifier
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
+	struct t10_wwn *t10_wwn,
+	char *page)
+{
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+	struct t10_vpd *vpd;
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	ssize_t len = 0;
+
+	dev = se_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	spin_lock(&t10_wwn->t10_vpd_lock);
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
+		if (!(vpd->protocol_identifier_set))
+			continue;
+
+		transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
+
+		if ((len + strlen(buf) > PAGE_SIZE))
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+	}
+	spin_unlock(&t10_wwn->t10_vpd_lock);
+
+	return len;
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
+
+/*
+ * Generic wrapper for dumping VPD identifiers by association.
+ */
+#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc)				\
+static ssize_t target_core_dev_wwn_show_attr_##_name(			\
+	struct t10_wwn *t10_wwn,					\
+	char *page)							\
+{									\
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;		\
+	struct se_device *dev;						\
+	struct t10_vpd *vpd;							\
+	unsigned char buf[VPD_TMP_BUF_SIZE];				\
+	ssize_t len = 0;						\
+									\
+	dev = se_dev->se_dev_ptr;					\
+	if (!(dev))							\
+		return -ENODEV;						\
+									\
+	spin_lock(&t10_wwn->t10_vpd_lock);				\
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {	\
+		if (vpd->association != _assoc)				\
+			continue;					\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);	\
+		if ((len + strlen(buf) > PAGE_SIZE))			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if ((len + strlen(buf) > PAGE_SIZE))			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if ((len + strlen(buf) > PAGE_SIZE))			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+	}								\
+	spin_unlock(&t10_wwn->t10_vpd_lock);				\
+									\
+	return len;							\
+}
+
+/*
+ * VPD page 0x83 Assoication: Logical Unit
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: Target Port
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: SCSI Target Device
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
+
+static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
+	&target_core_dev_wwn_vpd_unit_serial.attr,
+	&target_core_dev_wwn_vpd_protocol_identifier.attr,
+	&target_core_dev_wwn_vpd_assoc_logical_unit.attr,
+	&target_core_dev_wwn_vpd_assoc_target_port.attr,
+	&target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_wwn_ops = {
+	.show_attribute		= target_core_dev_wwn_attr_show,
+	.store_attribute	= target_core_dev_wwn_attr_store,
+};
+
+static struct config_item_type target_core_dev_wwn_cit = {
+	.ct_item_ops		= &target_core_dev_wwn_ops,
+	.ct_attrs		= target_core_dev_wwn_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*  End functions for struct config_item_type target_core_dev_wwn_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_pr_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+#define SE_DEV_PR_ATTR(_name, _mode)					\
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_dev_pr_show_attr_##_name,				\
+	target_core_dev_pr_store_attr_##_name);
+
+#define SE_DEV_PR_ATTR_RO(_name);					\
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name =	\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_dev_pr_show_attr_##_name);
+
+/*
+ * res_holder
+ */
+static ssize_t target_core_dev_pr_show_spc3_res(
+	struct se_device *dev,
+	char *page,
+	ssize_t *len)
+{
+	struct se_node_acl *se_nacl;
+	struct t10_pr_registration *pr_reg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		*len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return *len;
+	}
+	se_nacl = pr_reg->pr_reg_nacl;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	*len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+		TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+		se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return *len;
+}
+
+static ssize_t target_core_dev_pr_show_spc2_res(
+	struct se_device *dev,
+	char *page,
+	ssize_t *len)
+{
+	struct se_node_acl *se_nacl;
+
+	spin_lock(&dev->dev_reservation_lock);
+	se_nacl = dev->dev_reserved_node_acl;
+	if (!(se_nacl)) {
+		*len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return *len;
+	}
+	*len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
+		TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+		se_nacl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return *len;
+}
+
+static ssize_t target_core_dev_pr_show_attr_res_holder(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	ssize_t len = 0;
+
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	switch (T10_RES(su_dev)->res_type) {
+	case SPC3_PERSISTENT_RESERVATIONS:
+		target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
+				page, &len);
+		break;
+	case SPC2_RESERVATIONS:
+		target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
+				page, &len);
+		break;
+	case SPC_PASSTHROUGH:
+		len += sprintf(page+len, "Passthrough\n");
+		break;
+	default:
+		len += sprintf(page+len, "Unknown\n");
+		break;
+	}
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_holder);
+
+/*
+ * res_pr_all_tgt_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct t10_pr_registration *pr_reg;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	/*
+	 * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
+	 * Basic PERSISTENT RESERVER OUT parameter list, page 290
+	 */
+	if (pr_reg->pr_reg_all_tg_pt)
+		len = sprintf(page, "SPC-3 Reservation: All Target"
+			" Ports registration\n");
+	else
+		len = sprintf(page, "SPC-3 Reservation: Single"
+			" Target Port registration\n");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
+
+/*
+ * res_pr_generation
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_generation);
+
+/*
+ * res_pr_holder_tg_port
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct se_node_acl *se_nacl;
+	struct se_lun *lun;
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg;
+	struct target_core_fabric_ops *tfo;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	se_nacl = pr_reg->pr_reg_nacl;
+	se_tpg = se_nacl->se_tpg;
+	lun = pr_reg->pr_reg_tg_pt_lun;
+	tfo = TPG_TFO(se_tpg);
+
+	len += sprintf(page+len, "SPC-3 Reservation: %s"
+		" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
+		tfo->tpg_get_wwn(se_tpg));
+	len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
+		" Identifer Tag: %hu %s Portal Group Tag: %hu"
+		" %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
+		tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
+		tfo->get_fabric_name(), lun->unpacked_lun);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
+
+/*
+ * res_pr_registered_i_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct target_core_fabric_ops *tfo;
+	struct t10_pr_registration *pr_reg;
+	unsigned char buf[384];
+	char i_buf[PR_REG_ISID_ID_LEN];
+	ssize_t len = 0;
+	int reg_count = 0, prf_isid;
+
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	len += sprintf(page+len, "SPC-3 PR Registrations:\n");
+
+	spin_lock(&T10_RES(su_dev)->registration_lock);
+	list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+			pr_reg_list) {
+
+		memset(buf, 0, 384);
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+		prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+					PR_REG_ISID_ID_LEN);
+		sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
+			tfo->get_fabric_name(),
+			pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
+			&i_buf[0] : "", pr_reg->pr_res_key,
+			pr_reg->pr_res_generation);
+
+		if ((len + strlen(buf) > PAGE_SIZE))
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+		reg_count++;
+	}
+	spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+	if (!(reg_count))
+		len += sprintf(page+len, "None\n");
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
+
+/*
+ * res_pr_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_type(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct t10_pr_registration *pr_reg;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_type);
+
+/*
+ * res_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_type(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	ssize_t len = 0;
+
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	switch (T10_RES(su_dev)->res_type) {
+	case SPC3_PERSISTENT_RESERVATIONS:
+		len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
+		break;
+	case SPC2_RESERVATIONS:
+		len = sprintf(page, "SPC2_RESERVATIONS\n");
+		break;
+	case SPC_PASSTHROUGH:
+		len = sprintf(page, "SPC_PASSTHROUGH\n");
+		break;
+	default:
+		len = sprintf(page, "UNKNOWN\n");
+		break;
+	}
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_type);
+
+/*
+ * res_aptpl_active
+ */
+
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "APTPL Bit Status: %s\n",
+		(T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
+}
+
+SE_DEV_PR_ATTR_RO(res_aptpl_active);
+
+/*
+ * res_aptpl_metadata
+ */
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "Ready to process PR APTPL metadata..\n");
+}
+
+enum {
+	Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
+	Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
+	Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
+	Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_initiator_fabric, "initiator_fabric=%s"},
+	{Opt_initiator_node, "initiator_node=%s"},
+	{Opt_initiator_sid, "initiator_sid=%s"},
+	{Opt_sa_res_key, "sa_res_key=%s"},
+	{Opt_res_holder, "res_holder=%d"},
+	{Opt_res_type, "res_type=%d"},
+	{Opt_res_scope, "res_scope=%d"},
+	{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
+	{Opt_mapped_lun, "mapped_lun=%d"},
+	{Opt_target_fabric, "target_fabric=%s"},
+	{Opt_target_node, "target_node=%s"},
+	{Opt_tpgt, "tpgt=%d"},
+	{Opt_port_rtpi, "port_rtpi=%d"},
+	{Opt_target_lun, "target_lun=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
+	struct se_subsystem_dev *su_dev,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev;
+	unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL;
+	unsigned char *isid = NULL;
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	unsigned long long tmp_ll;
+	u64 sa_res_key = 0;
+	u32 mapped_lun = 0, target_lun = 0;
+	int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
+	u16 port_rpti = 0, tpgt = 0;
+	u8 type = 0, scope;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_INFO "Unable to process APTPL metadata while"
+			" active fabric exports exist\n");
+		return -EINVAL;
+	}
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_initiator_fabric:
+			i_fabric = match_strdup(&args[0]);
+			break;
+		case Opt_initiator_node:
+			i_port = match_strdup(&args[0]);
+			if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
+				printk(KERN_ERR "APTPL metadata initiator_node="
+					" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
+					PR_APTPL_MAX_IPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_initiator_sid:
+			isid = match_strdup(&args[0]);
+			if (strlen(isid) > PR_REG_ISID_LEN) {
+				printk(KERN_ERR "APTPL metadata initiator_isid"
+					"= exceeds PR_REG_ISID_LEN: %d\n",
+					PR_REG_ISID_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_sa_res_key:
+			arg_p = match_strdup(&args[0]);
+			ret = strict_strtoull(arg_p, 0, &tmp_ll);
+			if (ret < 0) {
+				printk(KERN_ERR "strict_strtoull() failed for"
+					" sa_res_key=\n");
+				goto out;
+			}
+			sa_res_key = (u64)tmp_ll;
+			break;
+		/*
+		 * PR APTPL Metadata for Reservation
+		 */
+		case Opt_res_holder:
+			match_int(args, &arg);
+			res_holder = arg;
+			break;
+		case Opt_res_type:
+			match_int(args, &arg);
+			type = (u8)arg;
+			break;
+		case Opt_res_scope:
+			match_int(args, &arg);
+			scope = (u8)arg;
+			break;
+		case Opt_res_all_tg_pt:
+			match_int(args, &arg);
+			all_tg_pt = (int)arg;
+			break;
+		case Opt_mapped_lun:
+			match_int(args, &arg);
+			mapped_lun = (u32)arg;
+			break;
+		/*
+		 * PR APTPL Metadata for Target Port
+		 */
+		case Opt_target_fabric:
+			t_fabric = match_strdup(&args[0]);
+			break;
+		case Opt_target_node:
+			t_port = match_strdup(&args[0]);
+			if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
+				printk(KERN_ERR "APTPL metadata target_node="
+					" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
+					PR_APTPL_MAX_TPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_tpgt:
+			match_int(args, &arg);
+			tpgt = (u16)arg;
+			break;
+		case Opt_port_rtpi:
+			match_int(args, &arg);
+			port_rpti = (u16)arg;
+			break;
+		case Opt_target_lun:
+			match_int(args, &arg);
+			target_lun = (u32)arg;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!(i_port) || !(t_port) || !(sa_res_key)) {
+		printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (res_holder && !(type)) {
+		printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
+				" holder\n", type);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
+			i_port, isid, mapped_lun, t_port, tpgt, target_lun,
+			res_holder, all_tg_pt, type);
+out:
+	kfree(orig);
+	return (ret == 0) ? count : ret;
+}
+
+SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+
+static struct configfs_attribute *target_core_dev_pr_attrs[] = {
+	&target_core_dev_pr_res_holder.attr,
+	&target_core_dev_pr_res_pr_all_tgt_pts.attr,
+	&target_core_dev_pr_res_pr_generation.attr,
+	&target_core_dev_pr_res_pr_holder_tg_port.attr,
+	&target_core_dev_pr_res_pr_registered_i_pts.attr,
+	&target_core_dev_pr_res_pr_type.attr,
+	&target_core_dev_pr_res_type.attr,
+	&target_core_dev_pr_res_aptpl_active.attr,
+	&target_core_dev_pr_res_aptpl_metadata.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_pr_ops = {
+	.show_attribute		= target_core_dev_pr_attr_show,
+	.store_attribute	= target_core_dev_pr_attr_store,
+};
+
+static struct config_item_type target_core_dev_pr_cit = {
+	.ct_item_ops		= &target_core_dev_pr_ops,
+	.ct_attrs		= target_core_dev_pr_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*  End functions for struct config_item_type target_core_dev_pr_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_cit */
+
+static ssize_t target_core_show_dev_info(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+	int bl = 0;
+	ssize_t read_bytes = 0;
+
+	if (!(se_dev->se_dev_ptr))
+		return -ENODEV;
+
+	transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+	read_bytes += bl;
+	read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_info = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "info",
+		    .ca_mode = S_IRUGO },
+	.show	= target_core_show_dev_info,
+	.store	= NULL,
+};
+
+static ssize_t target_core_store_dev_control(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+
+	if (!(se_dev->se_dev_su_ptr)) {
+		printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
+				"_dev_su_ptr\n");
+		return -EINVAL;
+	}
+
+	return t->set_configfs_dev_params(hba, se_dev, page, count);
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_control = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "control",
+		    .ca_mode = S_IWUSR },
+	.show	= NULL,
+	.store	= target_core_store_dev_control,
+};
+
+static ssize_t target_core_show_dev_alias(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+	if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+}
+
+static ssize_t target_core_store_dev_alias(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_DEV_ALIAS_LEN-1)) {
+		printk(KERN_ERR "alias count: %d exceeds"
+			" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
+			SE_DEV_ALIAS_LEN-1);
+		return -EINVAL;
+	}
+
+	se_dev->su_dev_flags |= SDF_USING_ALIAS;
+	read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
+			"%s", page);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&se_dev->se_dev_group.cg_item),
+		se_dev->se_dev_alias);
+
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alias = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "alias",
+		    .ca_mode =  S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_alias,
+	.store	= target_core_store_dev_alias,
+};
+
+static ssize_t target_core_show_dev_udev_path(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+	if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+}
+
+static ssize_t target_core_store_dev_udev_path(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_UDEV_PATH_LEN-1)) {
+		printk(KERN_ERR "udev_path count: %d exceeds"
+			" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
+			SE_UDEV_PATH_LEN-1);
+		return -EINVAL;
+	}
+
+	se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+	read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+			"%s", page);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&se_dev->se_dev_group.cg_item),
+		se_dev->se_dev_udev_path);
+
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "udev_path",
+		    .ca_mode =  S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_udev_path,
+	.store	= target_core_store_dev_udev_path,
+};
+
+static ssize_t target_core_store_dev_enable(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_device *dev;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+	char *ptr;
+
+	ptr = strstr(page, "1");
+	if (!(ptr)) {
+		printk(KERN_ERR "For dev_enable ops, only valid value"
+				" is \"1\"\n");
+		return -EINVAL;
+	}
+	if ((se_dev->se_dev_ptr)) {
+		printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
+				" object\n");
+		return -EEXIST;
+	}
+
+	if (t->check_configfs_dev_params(hba, se_dev) < 0)
+		return -EINVAL;
+
+	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+	if (!(dev) || IS_ERR(dev))
+		return -EINVAL;
+
+	se_dev->se_dev_ptr = dev;
+	printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+		" %p\n", se_dev->se_dev_ptr);
+
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_enable = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "enable",
+		    .ca_mode = S_IWUSR },
+	.show	= NULL,
+	.store	= target_core_store_dev_enable,
+};
+
+static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
+{
+	struct se_device *dev;
+	struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+	struct config_item *lu_ci;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
+		return len;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!(lu_gp_mem)) {
+		printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+				" pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if ((lu_gp)) {
+		lu_ci = &lu_gp->lu_gp_group.cg_item;
+		len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
+			config_item_name(lu_ci), lu_gp->lu_gp_id);
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	return len;
+}
+
+static ssize_t target_core_store_alua_lu_gp(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev;
+	struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = su_dev->se_dev_hba;
+	struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+	int move = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+		printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+			config_item_name(&hba->hba_group.cg_item),
+			config_item_name(&su_dev->se_dev_group.cg_item));
+		return -EINVAL;
+	}
+	if (count > LU_GROUP_NAME_BUF) {
+		printk(KERN_ERR "ALUA LU Group Alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA logical unit alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_lu_gp_by_name() will increment reference to
+		 * struct t10_alua_lu_gp.  This reference is released with
+		 * core_alua_get_lu_gp_by_name below().
+		 */
+		lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
+		if (!(lu_gp_new))
+			return -ENODEV;
+	}
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!(lu_gp_mem)) {
+		if (lu_gp_new)
+			core_alua_put_lu_gp_from_name(lu_gp_new);
+		printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+				" pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if ((lu_gp)) {
+		/*
+		 * Clearing an existing lu_gp association, and replacing
+		 * with NULL
+		 */
+		if (!(lu_gp_new)) {
+			printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
+				" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
+				" %hu\n",
+				config_item_name(&hba->hba_group.cg_item),
+				config_item_name(&su_dev->se_dev_group.cg_item),
+				config_item_name(&lu_gp->lu_gp_group.cg_item),
+				lu_gp->lu_gp_id);
+
+			__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+			return count;
+		}
+		/*
+		 * Removing existing association of lu_gp_mem with lu_gp
+		 */
+		__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+		move = 1;
+	}
+	/*
+	 * Associate lu_gp_mem with lu_gp_new.
+	 */
+	__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+		" core/alua/lu_gps/%s, ID: %hu\n",
+		(move) ? "Moving" : "Adding",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&su_dev->se_dev_group.cg_item),
+		config_item_name(&lu_gp_new->lu_gp_group.cg_item),
+		lu_gp_new->lu_gp_id);
+
+	core_alua_put_lu_gp_from_name(lu_gp_new);
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "alua_lu_gp",
+		    .ca_mode = S_IRUGO | S_IWUSR },
+	.show	= target_core_show_alua_lu_gp,
+	.store	= target_core_store_alua_lu_gp,
+};
+
+static struct configfs_attribute *lio_core_dev_attrs[] = {
+	&target_core_attr_dev_info.attr,
+	&target_core_attr_dev_control.attr,
+	&target_core_attr_dev_alias.attr,
+	&target_core_attr_dev_udev_path.attr,
+	&target_core_attr_dev_enable.attr,
+	&target_core_attr_dev_alua_lu_gp.attr,
+	NULL,
+};
+
+static void target_core_dev_release(struct config_item *item)
+{
+	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+				struct se_subsystem_dev, se_dev_group);
+	struct config_group *dev_cg;
+
+	if (!(se_dev))
+		return;
+
+	dev_cg = &se_dev->se_dev_group;
+	kfree(dev_cg->default_groups);
+}
+
+static ssize_t target_core_dev_show(struct config_item *item,
+				     struct configfs_attribute *attr,
+				     char *page)
+{
+	struct se_subsystem_dev *se_dev = container_of(
+			to_config_group(item), struct se_subsystem_dev,
+			se_dev_group);
+	struct target_core_configfs_attribute *tc_attr = container_of(
+			attr, struct target_core_configfs_attribute, attr);
+
+	if (!(tc_attr->show))
+		return -EINVAL;
+
+	return tc_attr->show((void *)se_dev, page);
+}
+
+static ssize_t target_core_dev_store(struct config_item *item,
+				      struct configfs_attribute *attr,
+				      const char *page, size_t count)
+{
+	struct se_subsystem_dev *se_dev = container_of(
+			to_config_group(item), struct se_subsystem_dev,
+			se_dev_group);
+	struct target_core_configfs_attribute *tc_attr = container_of(
+			attr, struct target_core_configfs_attribute, attr);
+
+	if (!(tc_attr->store))
+		return -EINVAL;
+
+	return tc_attr->store((void *)se_dev, page, count);
+}
+
+static struct configfs_item_operations target_core_dev_item_ops = {
+	.release		= target_core_dev_release,
+	.show_attribute		= target_core_dev_show,
+	.store_attribute	= target_core_dev_store,
+};
+
+static struct config_item_type target_core_dev_cit = {
+	.ct_item_ops		= &target_core_dev_item_ops,
+	.ct_attrs		= lio_core_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
+#define SE_DEV_ALUA_LU_ATTR(_name, _mode)				\
+static struct target_core_alua_lu_gp_attribute				\
+			target_core_alua_lu_gp_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_alua_lu_gp_show_attr_##_name,			\
+	target_core_alua_lu_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_LU_ATTR_RO(_name)					\
+static struct target_core_alua_lu_gp_attribute				\
+			target_core_alua_lu_gp_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_alua_lu_gp_show_attr_##_name);
+
+/*
+ * lu_gp_id
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
+	struct t10_alua_lu_gp *lu_gp,
+	char *page)
+{
+	if (!(lu_gp->lu_gp_valid_id))
+		return 0;
+
+	return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
+}
+
+static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
+	struct t10_alua_lu_gp *lu_gp,
+	const char *page,
+	size_t count)
+{
+	struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	unsigned long lu_gp_id;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &lu_gp_id);
+	if (ret < 0) {
+		printk(KERN_ERR "strict_strtoul() returned %d for"
+			" lu_gp_id\n", ret);
+		return -EINVAL;
+	}
+	if (lu_gp_id > 0x0000ffff) {
+		printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", lu_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s to ID: %hu\n",
+		config_item_name(&alua_lu_gp_cg->cg_item),
+		lu_gp->lu_gp_id);
+
+	return count;
+}
+
+SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_members(
+	struct t10_alua_lu_gp *lu_gp,
+	char *page)
+{
+	struct se_device *dev;
+	struct se_hba *hba;
+	struct se_subsystem_dev *su_dev;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		su_dev = dev->se_sub_dev;
+		hba = su_dev->se_dev_hba;
+
+		cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+			config_item_name(&hba->hba_group.cg_item),
+			config_item_name(&su_dev->se_dev_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	return len;
+}
+
+SE_DEV_ALUA_LU_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
+
+static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
+	&target_core_alua_lu_gp_lu_gp_id.attr,
+	&target_core_alua_lu_gp_members.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+	.show_attribute		= target_core_alua_lu_gp_attr_show,
+	.store_attribute	= target_core_alua_lu_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_lu_gp_cit = {
+	.ct_item_ops		= &target_core_alua_lu_gp_ops,
+	.ct_attrs		= target_core_alua_lu_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+static struct config_group *target_core_alua_create_lu_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_group *alua_lu_gp_cg = NULL;
+	struct config_item *alua_lu_gp_ci = NULL;
+
+	lu_gp = core_alua_allocate_lu_gp(name, 0);
+	if (IS_ERR(lu_gp))
+		return NULL;
+
+	alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_lu_gp_cg, name,
+			&target_core_alua_lu_gp_cit);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s\n",
+		config_item_name(alua_lu_gp_ci));
+
+	return alua_lu_gp_cg;
+
+}
+
+static void target_core_alua_drop_lu_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s, ID: %hu\n",
+		config_item_name(item), lu_gp->lu_gp_id);
+
+	config_item_put(item);
+	core_alua_free_lu_gp(lu_gp);
+}
+
+static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
+	.make_group		= &target_core_alua_create_lu_gp,
+	.drop_item		= &target_core_alua_drop_lu_gp,
+};
+
+static struct config_item_type target_core_alua_lu_gps_cit = {
+	.ct_item_ops		= NULL,
+	.ct_group_ops		= &target_core_alua_lu_gps_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
+#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode)				\
+static struct target_core_alua_tg_pt_gp_attribute			\
+			target_core_alua_tg_pt_gp_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_alua_tg_pt_gp_show_attr_##_name,			\
+	target_core_alua_tg_pt_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name)				\
+static struct target_core_alua_tg_pt_gp_attribute			\
+			target_core_alua_tg_pt_gp_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_alua_tg_pt_gp_show_attr_##_name);
+
+/*
+ * alua_access_state
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n",
+		atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	unsigned long tmp;
+	int new_state, ret;
+
+	if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+		printk(KERN_ERR "Unable to do implict ALUA on non valid"
+			" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk("Unable to extract new ALUA access state from"
+				" %s\n", page);
+		return -EINVAL;
+	}
+	new_state = (int)tmp;
+
+	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
+		printk(KERN_ERR "Unable to process implict configfs ALUA"
+			" transition while TPGS_IMPLICT_ALUA is diabled\n");
+		return -EINVAL;
+	}
+
+	ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+					NULL, NULL, new_state, 0);
+	return (!ret) ? count : -EINVAL;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_status
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%s\n",
+		core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int new_status, ret;
+
+	if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+		printk(KERN_ERR "Unable to do set ALUA access status on non"
+			" valid tg_pt_gp ID: %hu\n",
+			tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract new ALUA access status"
+				" from %s\n", page);
+		return -EINVAL;
+	}
+	new_status = (int)tmp;
+
+	if ((new_status != ALUA_STATUS_NONE) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+		printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
+				new_status);
+		return -EINVAL;
+	}
+
+	tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_type
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_access_type(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_access_type(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_write_metadata
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_write_metadata\n");
+		return -EINVAL;
+	}
+
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for alua_write_metadata:"
+			" %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
+
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
+
+
+
+/*
+ * nonop_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
+
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * trans_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * preferred
+ */
+
+static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_preferred_bit(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_preferred_bit(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
+
+/*
+ * tg_pt_gp_id
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	if (!(tg_pt_gp->tg_pt_gp_valid_id))
+		return 0;
+
+	return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	unsigned long tg_pt_gp_id;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tg_pt_gp_id);
+	if (ret < 0) {
+		printk(KERN_ERR "strict_strtoul() returned %d for"
+			" tg_pt_gp_id\n", ret);
+		return -EINVAL;
+	}
+	if (tg_pt_gp_id > 0x0000ffff) {
+		printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", tg_pt_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
+		"core/alua/tg_pt_gps/%s to ID: %hu\n",
+		config_item_name(&alua_tg_pt_gp_cg->cg_item),
+		tg_pt_gp->tg_pt_gp_id);
+
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	struct se_port *port;
+	struct se_portal_group *tpg;
+	struct se_lun *lun;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+			tg_pt_gp_mem_list) {
+		port = tg_pt_gp_mem->tg_pt;
+		tpg = port->sep_tpg;
+		lun = port->sep_lun;
+
+		cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
+			"/%s\n", TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			TPG_TFO(tpg)->tpg_get_tag(tpg),
+			config_item_name(&lun->lun_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	return len;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
+			tg_pt_gp_group);
+
+static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
+	&target_core_alua_tg_pt_gp_alua_access_state.attr,
+	&target_core_alua_tg_pt_gp_alua_access_status.attr,
+	&target_core_alua_tg_pt_gp_alua_access_type.attr,
+	&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
+	&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
+	&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
+	&target_core_alua_tg_pt_gp_preferred.attr,
+	&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
+	&target_core_alua_tg_pt_gp_members.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+	.show_attribute		= target_core_alua_tg_pt_gp_attr_show,
+	.store_attribute	= target_core_alua_tg_pt_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+	.ct_item_ops		= &target_core_alua_tg_pt_gp_ops,
+	.ct_attrs		= target_core_alua_tg_pt_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+static struct config_group *target_core_alua_create_tg_pt_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua *alua = container_of(group, struct t10_alua,
+					alua_tg_pt_gps_group);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
+	struct config_group *alua_tg_pt_gp_cg = NULL;
+	struct config_item *alua_tg_pt_gp_ci = NULL;
+
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+	if (!(tg_pt_gp))
+		return NULL;
+
+	alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_tg_pt_gp_cg, name,
+			&target_core_alua_tg_pt_gp_cit);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s\n",
+		config_item_name(alua_tg_pt_gp_ci));
+
+	return alua_tg_pt_gp_cg;
+}
+
+static void target_core_alua_drop_tg_pt_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
+		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
+
+	config_item_put(item);
+	core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
+static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
+	.make_group		= &target_core_alua_create_tg_pt_gp,
+	.drop_item		= &target_core_alua_drop_tg_pt_gp,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gps_cit = {
+	.ct_group_ops		= &target_core_alua_tg_pt_gps_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_cit */
+
+/*
+ * target_core_alua_cit is a ConfigFS group that lives under
+ * /sys/kernel/config/target/core/alua.  There are default groups
+ * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
+ * target_core_alua_cit in target_core_init_configfs() below.
+ */
+static struct config_item_type target_core_alua_cit = {
+	.ct_item_ops		= NULL,
+	.ct_attrs		= NULL,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_cit */
+
+/* Start functions for struct config_item_type target_core_hba_cit */
+
+static struct config_group *target_core_make_subdev(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_subsystem_dev *se_dev;
+	struct se_subsystem_api *t;
+	struct config_item *hba_ci = &group->cg_item;
+	struct se_hba *hba = item_to_hba(hba_ci);
+	struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
+
+	if (mutex_lock_interruptible(&hba->hba_access_mutex))
+		return NULL;
+
+	/*
+	 * Locate the struct se_subsystem_api from parent's struct se_hba.
+	 */
+	t = hba->transport;
+
+	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+	if (!se_dev) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct se_subsystem_dev\n");
+		goto unlock;
+	}
+	INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+	spin_lock_init(&se_dev->t10_reservation.registration_lock);
+	spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+	spin_lock_init(&se_dev->se_dev_lock);
+	se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+	se_dev->t10_wwn.t10_sub_dev = se_dev;
+	se_dev->t10_alua.t10_sub_dev = se_dev;
+	se_dev->se_dev_attrib.da_sub_dev = se_dev;
+
+	se_dev->se_dev_hba = hba;
+	dev_cg = &se_dev->se_dev_group;
+
+	dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+			GFP_KERNEL);
+	if (!(dev_cg->default_groups))
+		goto out;
+	/*
+	 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
+	 * for ->allocate_virtdevice()
+	 *
+	 * se_dev->se_dev_ptr will be set after ->create_virtdev()
+	 * has been called successfully in the next level up in the
+	 * configfs tree for device object's struct config_group.
+	 */
+	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
+	if (!(se_dev->se_dev_su_ptr)) {
+		printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+			" from allocate_virtdevice()\n");
+		goto out;
+	}
+	spin_lock(&se_global->g_device_lock);
+	list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
+	spin_unlock(&se_global->g_device_lock);
+
+	config_group_init_type_name(&se_dev->se_dev_group, name,
+			&target_core_dev_cit);
+	config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+			&target_core_dev_attrib_cit);
+	config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+			&target_core_dev_pr_cit);
+	config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+			&target_core_dev_wwn_cit);
+	config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+			"alua", &target_core_alua_tg_pt_gps_cit);
+	dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
+	dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
+	dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
+	dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
+	dev_cg->default_groups[4] = NULL;
+	/*
+	 * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp
+	 */
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+	if (!(tg_pt_gp))
+		goto out;
+
+	tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+	tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!(tg_pt_gp_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
+				"default_groups\n");
+		goto out;
+	}
+
+	config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
+			"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
+	tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
+	tg_pt_gp_cg->default_groups[1] = NULL;
+	T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+		" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+
+	mutex_unlock(&hba->hba_access_mutex);
+	return &se_dev->se_dev_group;
+out:
+	if (T10_ALUA(se_dev)->default_tg_pt_gp) {
+		core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+		T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+	}
+	if (tg_pt_gp_cg)
+		kfree(tg_pt_gp_cg->default_groups);
+	if (dev_cg)
+		kfree(dev_cg->default_groups);
+	if (se_dev->se_dev_su_ptr)
+		t->free_device(se_dev->se_dev_su_ptr);
+	kfree(se_dev);
+unlock:
+	mutex_unlock(&hba->hba_access_mutex);
+	return NULL;
+}
+
+static void target_core_drop_subdev(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+				struct se_subsystem_dev, se_dev_group);
+	struct se_hba *hba;
+	struct se_subsystem_api *t;
+	struct config_item *df_item;
+	struct config_group *dev_cg, *tg_pt_gp_cg;
+	int i, ret;
+
+	hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+
+	if (mutex_lock_interruptible(&hba->hba_access_mutex))
+		goto out;
+
+	t = hba->transport;
+
+	spin_lock(&se_global->g_device_lock);
+	list_del(&se_dev->g_se_dev_list);
+	spin_unlock(&se_global->g_device_lock);
+
+	tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+	for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
+		df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
+		tg_pt_gp_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(tg_pt_gp_cg->default_groups);
+	core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+	T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+
+	dev_cg = &se_dev->se_dev_group;
+	for (i = 0; dev_cg->default_groups[i]; i++) {
+		df_item = &dev_cg->default_groups[i]->cg_item;
+		dev_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+	/*
+	 * This pointer will set when the storage is enabled with:
+	 * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+	 */
+	if (se_dev->se_dev_ptr) {
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+			"virtual_device() for se_dev_ptr: %p\n",
+				se_dev->se_dev_ptr);
+
+		ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
+		if (ret < 0)
+			goto hba_out;
+	} else {
+		/*
+		 * Release struct se_subsystem_dev->se_dev_su_ptr..
+		 */
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+			"device() for se_dev_su_ptr: %p\n",
+			se_dev->se_dev_su_ptr);
+
+		t->free_device(se_dev->se_dev_su_ptr);
+	}
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+		"_dev_t: %p\n", se_dev);
+
+hba_out:
+	mutex_unlock(&hba->hba_access_mutex);
+out:
+	kfree(se_dev);
+}
+
+static struct configfs_group_operations target_core_hba_group_ops = {
+	.make_group		= target_core_make_subdev,
+	.drop_item		= target_core_drop_subdev,
+};
+
+CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
+#define SE_HBA_ATTR(_name, _mode)				\
+static struct target_core_hba_attribute				\
+		target_core_hba_##_name =			\
+		__CONFIGFS_EATTR(_name, _mode,			\
+		target_core_hba_show_attr_##_name,		\
+		target_core_hba_store_attr_##_name);
+
+#define SE_HBA_ATTR_RO(_name)					\
+static struct target_core_hba_attribute				\
+		target_core_hba_##_name =			\
+		__CONFIGFS_EATTR_RO(_name,			\
+		target_core_hba_show_attr_##_name);
+
+static ssize_t target_core_hba_show_attr_hba_info(
+	struct se_hba *hba,
+	char *page)
+{
+	return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
+			hba->hba_id, hba->transport->name,
+			TARGET_CORE_CONFIGFS_VERSION);
+}
+
+SE_HBA_ATTR_RO(hba_info);
+
+static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
+				char *page)
+{
+	int hba_mode = 0;
+
+	if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
+		hba_mode = 1;
+
+	return sprintf(page, "%d\n", hba_mode);
+}
+
+static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
+				const char *page, size_t count)
+{
+	struct se_subsystem_api *transport = hba->transport;
+	unsigned long mode_flag;
+	int ret;
+
+	if (transport->pmode_enable_hba == NULL)
+		return -EINVAL;
+
+	ret = strict_strtoul(page, 0, &mode_flag);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
+		return -EINVAL;
+	}
+
+	spin_lock(&hba->device_lock);
+	if (!(list_empty(&hba->hba_dev_list))) {
+		printk(KERN_ERR "Unable to set hba_mode with active devices\n");
+		spin_unlock(&hba->device_lock);
+		return -EINVAL;
+	}
+	spin_unlock(&hba->device_lock);
+
+	ret = transport->pmode_enable_hba(hba, mode_flag);
+	if (ret < 0)
+		return -EINVAL;
+	if (ret > 0)
+		hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+	else if (ret == 0)
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+
+	return count;
+}
+
+SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
+
+static struct configfs_attribute *target_core_hba_attrs[] = {
+	&target_core_hba_hba_info.attr,
+	&target_core_hba_hba_mode.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_hba_item_ops = {
+	.show_attribute		= target_core_hba_attr_show,
+	.store_attribute	= target_core_hba_attr_store,
+};
+
+static struct config_item_type target_core_hba_cit = {
+	.ct_item_ops		= &target_core_hba_item_ops,
+	.ct_group_ops		= &target_core_hba_group_ops,
+	.ct_attrs		= target_core_hba_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *target_core_call_addhbatotarget(
+	struct config_group *group,
+	const char *name)
+{
+	char *se_plugin_str, *str, *str2;
+	struct se_hba *hba;
+	char buf[TARGET_CORE_NAME_MAX_LEN];
+	unsigned long plugin_dep_id = 0;
+	int ret;
+
+	memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
+	if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
+		printk(KERN_ERR "Passed *name strlen(): %d exceeds"
+			" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
+			TARGET_CORE_NAME_MAX_LEN);
+		return ERR_PTR(-ENAMETOOLONG);
+	}
+	snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
+
+	str = strstr(buf, "_");
+	if (!(str)) {
+		printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+		return ERR_PTR(-EINVAL);
+	}
+	se_plugin_str = buf;
+	/*
+	 * Special case for subsystem plugins that have "_" in their names.
+	 * Namely rd_direct and rd_mcp..
+	 */
+	str2 = strstr(str+1, "_");
+	if ((str2)) {
+		*str2 = '\0'; /* Terminate for *se_plugin_str */
+		str2++; /* Skip to start of plugin dependent ID */
+		str = str2;
+	} else {
+		*str = '\0'; /* Terminate for *se_plugin_str */
+		str++; /* Skip to start of plugin dependent ID */
+	}
+
+	ret = strict_strtoul(str, 0, &plugin_dep_id);
+	if (ret < 0) {
+		printk(KERN_ERR "strict_strtoul() returned %d for"
+				" plugin_dep_id\n", ret);
+		return ERR_PTR(-EINVAL);
+	}
+	/*
+	 * Load up TCM subsystem plugins if they have not already been loaded.
+	 */
+	if (transport_subsystem_check_init() < 0)
+		return ERR_PTR(-EINVAL);
+
+	hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
+	if (IS_ERR(hba))
+		return ERR_CAST(hba);
+
+	config_group_init_type_name(&hba->hba_group, name,
+			&target_core_hba_cit);
+
+	return &hba->hba_group;
+}
+
+static void target_core_call_delhbafromtarget(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_hba *hba = item_to_hba(item);
+
+	config_item_put(item);
+	core_delete_hba(hba);
+}
+
+static struct configfs_group_operations target_core_group_ops = {
+	.make_group	= target_core_call_addhbatotarget,
+	.drop_item	= target_core_call_delhbafromtarget,
+};
+
+static struct config_item_type target_core_cit = {
+	.ct_item_ops	= NULL,
+	.ct_group_ops	= &target_core_group_ops,
+	.ct_attrs	= NULL,
+	.ct_owner	= THIS_MODULE,
+};
+
+/* Stop functions for struct config_item_type target_core_hba_cit */
+
+static int target_core_init_configfs(void)
+{
+	struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
+	struct config_group *lu_gp_cg = NULL;
+	struct configfs_subsystem *subsys;
+	struct proc_dir_entry *scsi_target_proc = NULL;
+	struct t10_alua_lu_gp *lu_gp;
+	int ret;
+
+	printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
+		" Engine: %s on %s/%s on "UTS_RELEASE"\n",
+		TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
+
+	subsys = target_core_subsystem[0];
+	config_group_init(&subsys->su_group);
+	mutex_init(&subsys->su_mutex);
+
+	INIT_LIST_HEAD(&g_tf_list);
+	mutex_init(&g_tf_lock);
+	init_scsi_index_table();
+	ret = init_se_global();
+	if (ret < 0)
+		return -1;
+	/*
+	 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
+	 * and ALUA Logical Unit Group and Target Port Group infrastructure.
+	 */
+	target_cg = &subsys->su_group;
+	target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!(target_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
+		goto out_global;
+	}
+
+	config_group_init_type_name(&se_global->target_core_hbagroup,
+			"core", &target_core_cit);
+	target_cg->default_groups[0] = &se_global->target_core_hbagroup;
+	target_cg->default_groups[1] = NULL;
+	/*
+	 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
+	 */
+	hba_cg = &se_global->target_core_hbagroup;
+	hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!(hba_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
+		goto out_global;
+	}
+	config_group_init_type_name(&se_global->alua_group,
+			"alua", &target_core_alua_cit);
+	hba_cg->default_groups[0] = &se_global->alua_group;
+	hba_cg->default_groups[1] = NULL;
+	/*
+	 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
+	 * groups under /sys/kernel/config/target/core/alua/
+	 */
+	alua_cg = &se_global->alua_group;
+	alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+			GFP_KERNEL);
+	if (!(alua_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
+		goto out_global;
+	}
+
+	config_group_init_type_name(&se_global->alua_lu_gps_group,
+			"lu_gps", &target_core_alua_lu_gps_cit);
+	alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
+	alua_cg->default_groups[1] = NULL;
+	/*
+	 * Add core/alua/lu_gps/default_lu_gp
+	 */
+	lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+	if (IS_ERR(lu_gp))
+		goto out_global;
+
+	lu_gp_cg = &se_global->alua_lu_gps_group;
+	lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+			GFP_KERNEL);
+	if (!(lu_gp_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
+		goto out_global;
+	}
+
+	config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
+				&target_core_alua_lu_gp_cit);
+	lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
+	lu_gp_cg->default_groups[1] = NULL;
+	se_global->default_lu_gp = lu_gp;
+	/*
+	 * Register the target_core_mod subsystem with configfs.
+	 */
+	ret = configfs_register_subsystem(subsys);
+	if (ret < 0) {
+		printk(KERN_ERR "Error %d while registering subsystem %s\n",
+			ret, subsys->su_group.cg_item.ci_namebuf);
+		goto out_global;
+	}
+	printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
+		" Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
+		" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
+	/*
+	 * Register built-in RAMDISK subsystem logic for virtual LUN 0
+	 */
+	ret = rd_module_init();
+	if (ret < 0)
+		goto out;
+
+	if (core_dev_setup_virtual_lun0() < 0)
+		goto out;
+
+	scsi_target_proc = proc_mkdir("scsi_target", 0);
+	if (!(scsi_target_proc)) {
+		printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
+		goto out;
+	}
+	ret = init_scsi_target_mib();
+	if (ret < 0)
+		goto out;
+
+	return 0;
+
+out:
+	configfs_unregister_subsystem(subsys);
+	if (scsi_target_proc)
+		remove_proc_entry("scsi_target", 0);
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+out_global:
+	if (se_global->default_lu_gp) {
+		core_alua_free_lu_gp(se_global->default_lu_gp);
+		se_global->default_lu_gp = NULL;
+	}
+	if (lu_gp_cg)
+		kfree(lu_gp_cg->default_groups);
+	if (alua_cg)
+		kfree(alua_cg->default_groups);
+	if (hba_cg)
+		kfree(hba_cg->default_groups);
+	kfree(target_cg->default_groups);
+	release_se_global();
+	return -1;
+}
+
+static void target_core_exit_configfs(void)
+{
+	struct configfs_subsystem *subsys;
+	struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
+	struct config_item *item;
+	int i;
+
+	se_global->in_shutdown = 1;
+	subsys = target_core_subsystem[0];
+
+	lu_gp_cg = &se_global->alua_lu_gps_group;
+	for (i = 0; lu_gp_cg->default_groups[i]; i++) {
+		item = &lu_gp_cg->default_groups[i]->cg_item;
+		lu_gp_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(lu_gp_cg->default_groups);
+	core_alua_free_lu_gp(se_global->default_lu_gp);
+	se_global->default_lu_gp = NULL;
+
+	alua_cg = &se_global->alua_group;
+	for (i = 0; alua_cg->default_groups[i]; i++) {
+		item = &alua_cg->default_groups[i]->cg_item;
+		alua_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(alua_cg->default_groups);
+
+	hba_cg = &se_global->target_core_hbagroup;
+	for (i = 0; hba_cg->default_groups[i]; i++) {
+		item = &hba_cg->default_groups[i]->cg_item;
+		hba_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(hba_cg->default_groups);
+
+	for (i = 0; subsys->su_group.default_groups[i]; i++) {
+		item = &subsys->su_group.default_groups[i]->cg_item;
+		subsys->su_group.default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(subsys->su_group.default_groups);
+
+	configfs_unregister_subsystem(subsys);
+	printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
+			" Infrastructure\n");
+
+	remove_scsi_target_mib();
+	remove_proc_entry("scsi_target", 0);
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+	release_se_global();
+
+	return;
+}
+
+MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(target_core_init_configfs);
+module_exit(target_core_exit_configfs);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
new file mode 100644
index 0000000..317ce58
--- /dev/null
+++ b/drivers/target/target_core_device.c
@@ -0,0 +1,1694 @@
+/*******************************************************************************
+ * Filename:  target_core_device.c (based on iscsi_target_device.c)
+ *
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+static void se_dev_start(struct se_device *dev);
+static void se_dev_stop(struct se_device *dev);
+
+int transport_get_lun_for_cmd(
+	struct se_cmd *se_cmd,
+	unsigned char *cdb,
+	u32 unpacked_lun)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = SE_SESS(se_cmd);
+	unsigned long flags;
+	int read_only = 0;
+
+	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+	deve = se_cmd->se_deve =
+			&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+		if (se_cmd) {
+			deve->total_cmds++;
+			deve->total_bytes += se_cmd->data_length;
+
+			if (se_cmd->data_direction == DMA_TO_DEVICE) {
+				if (deve->lun_flags &
+						TRANSPORT_LUNFLAGS_READ_ONLY) {
+					read_only = 1;
+					goto out;
+				}
+				deve->write_bytes += se_cmd->data_length;
+			} else if (se_cmd->data_direction ==
+				   DMA_FROM_DEVICE) {
+				deve->read_bytes += se_cmd->data_length;
+			}
+		}
+		deve->deve_cmds++;
+
+		se_lun = se_cmd->se_lun = deve->se_lun;
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+		se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+	}
+out:
+	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+	if (!se_lun) {
+		if (read_only) {
+			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+				" Access for 0x%08x\n",
+				CMD_TFO(se_cmd)->get_fabric_name(),
+				unpacked_lun);
+			return -1;
+		} else {
+			/*
+			 * Use the se_portal_group->tpg_virt_lun0 to allow for
+			 * REPORT_LUNS, et al to be returned when no active
+			 * MappedLUN=0 exists for this Initiator Port.
+			 */
+			if (unpacked_lun != 0) {
+				se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+				printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+					" Access for 0x%08x\n",
+					CMD_TFO(se_cmd)->get_fabric_name(),
+					unpacked_lun);
+				return -1;
+			}
+			/*
+			 * Force WRITE PROTECT for virtual LUN 0
+			 */
+			if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+			    (se_cmd->data_direction != DMA_NONE)) {
+				se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+				return -1;
+			}
+#if 0
+			printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
+				CMD_TFO(se_cmd)->get_fabric_name());
+#endif
+			se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+			se_cmd->orig_fe_lun = 0;
+			se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+			se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+		}
+	}
+	/*
+	 * Determine if the struct se_lun is online.
+	 */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -1;
+	}
+
+	{
+	struct se_device *dev = se_lun->lun_se_dev;
+	spin_lock(&dev->stats_lock);
+	dev->num_cmds++;
+	if (se_cmd->data_direction == DMA_TO_DEVICE)
+		dev->write_bytes += se_cmd->data_length;
+	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+		dev->read_bytes += se_cmd->data_length;
+	spin_unlock(&dev->stats_lock);
+	}
+
+	/*
+	 * Add the iscsi_cmd_t to the struct se_lun's cmd list.  This list is used
+	 * for tracking state of struct se_cmds during LUN shutdown events.
+	 */
+	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
+	list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
+	atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
+#if 0
+	printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
+		CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
+#endif
+	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_cmd);
+
+int transport_get_lun_for_tmr(
+	struct se_cmd *se_cmd,
+	u32 unpacked_lun)
+{
+	struct se_device *dev = NULL;
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = SE_SESS(se_cmd);
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+
+	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+	deve = se_cmd->se_deve =
+			&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+		se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
+		dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+		se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+/*		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
+	}
+	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+	if (!se_lun) {
+		printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+			" Access for 0x%08x\n",
+			CMD_TFO(se_cmd)->get_fabric_name(),
+			unpacked_lun);
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -1;
+	}
+	/*
+	 * Determine if the struct se_lun is online.
+	 */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -1;
+	}
+
+	spin_lock(&dev->se_tmr_lock);
+	list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
+	spin_unlock(&dev->se_tmr_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_tmr);
+
+/*
+ * This function is called from core_scsi3_emulate_pro_register_and_move()
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
+ * when a matching rtpi is found.
+ */
+struct se_dev_entry *core_get_se_deve_from_rtpi(
+	struct se_node_acl *nacl,
+	u16 rtpi)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_port *port;
+	struct se_portal_group *tpg = nacl->se_tpg;
+	u32 i;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		lun = deve->se_lun;
+		if (!(lun)) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+		port = lun->lun_sep;
+		if (!(port)) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+		if (port->sep_rtpi != rtpi)
+			continue;
+
+		atomic_inc(&deve->pr_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		return deve;
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	return NULL;
+}
+
+int core_free_device_list_for_node(
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	u32 i;
+
+	if (!nacl->device_list)
+		return 0;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		if (!deve->se_lun) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+		lun = deve->se_lun;
+
+		spin_unlock_irq(&nacl->device_list_lock);
+		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+		spin_lock_irq(&nacl->device_list_lock);
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	kfree(nacl->device_list);
+	nacl->device_list = NULL;
+
+	return 0;
+}
+
+void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
+{
+	struct se_dev_entry *deve;
+
+	spin_lock_irq(&se_nacl->device_list_lock);
+	deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
+	deve->deve_cmds--;
+	spin_unlock_irq(&se_nacl->device_list_lock);
+
+	return;
+}
+
+void core_update_device_list_access(
+	u32 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[mapped_lun];
+	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+	} else {
+		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	return;
+}
+
+/*      core_update_device_list_for_node():
+ *
+ *
+ */
+int core_update_device_list_for_node(
+	struct se_lun *lun,
+	struct se_lun_acl *lun_acl,
+	u32 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg,
+	int enable)
+{
+	struct se_port *port = lun->lun_sep;
+	struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
+	int trans = 0;
+	/*
+	 * If the MappedLUN entry is being disabled, the entry in
+	 * port->sep_alua_list must be removed now before clearing the
+	 * struct se_dev_entry pointers below as logic in
+	 * core_alua_do_transition_tg_pt() depends on these being present.
+	 */
+	if (!(enable)) {
+		/*
+		 * deve->se_lun_acl will be NULL for demo-mode created LUNs
+		 * that have not been explictly concerted to MappedLUNs ->
+		 * struct se_lun_acl.
+		 */
+		if (!(deve->se_lun_acl))
+			return 0;
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_del(&deve->alua_port_list);
+		spin_unlock_bh(&port->sep_alua_lock);
+	}
+
+	spin_lock_irq(&nacl->device_list_lock);
+	if (enable) {
+		/*
+		 * Check if the call is handling demo mode -> explict LUN ACL
+		 * transition.  This transition must be for the same struct se_lun
+		 * + mapped_lun that was setup in demo mode..
+		 */
+		if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+			if (deve->se_lun_acl != NULL) {
+				printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
+					" already set for demo mode -> explict"
+					" LUN ACL transition\n");
+				return -1;
+			}
+			if (deve->se_lun != lun) {
+				printk(KERN_ERR "struct se_dev_entry->se_lun does"
+					" match passed struct se_lun for demo mode"
+					" -> explict LUN ACL transition\n");
+				return -1;
+			}
+			deve->se_lun_acl = lun_acl;
+			trans = 1;
+		} else {
+			deve->se_lun = lun;
+			deve->se_lun_acl = lun_acl;
+			deve->mapped_lun = mapped_lun;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
+		}
+
+		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+		}
+
+		if (trans) {
+			spin_unlock_irq(&nacl->device_list_lock);
+			return 0;
+		}
+		deve->creation_time = get_jiffies_64();
+		deve->attach_count++;
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		return 0;
+	}
+	/*
+	 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
+	 * PR operation to complete.
+	 */
+	spin_unlock_irq(&nacl->device_list_lock);
+	while (atomic_read(&deve->pr_ref_count) != 0)
+		cpu_relax();
+	spin_lock_irq(&nacl->device_list_lock);
+	/*
+	 * Disable struct se_dev_entry LUN ACL mapping
+	 */
+	core_scsi3_ua_release_all(deve);
+	deve->se_lun = NULL;
+	deve->se_lun_acl = NULL;
+	deve->lun_flags = 0;
+	deve->creation_time = 0;
+	deve->attach_count--;
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
+	return 0;
+}
+
+/*      core_clear_lun_from_tpg():
+ *
+ *
+ */
+void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
+{
+	struct se_node_acl *nacl;
+	struct se_dev_entry *deve;
+	u32 i;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
+		spin_unlock_bh(&tpg->acl_node_lock);
+
+		spin_lock_irq(&nacl->device_list_lock);
+		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+			deve = &nacl->device_list[i];
+			if (lun != deve->se_lun)
+				continue;
+			spin_unlock_irq(&nacl->device_list_lock);
+
+			core_update_device_list_for_node(lun, NULL,
+				deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
+				nacl, tpg, 0);
+
+			spin_lock_irq(&nacl->device_list_lock);
+		}
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		spin_lock_bh(&tpg->acl_node_lock);
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	return;
+}
+
+static struct se_port *core_alloc_port(struct se_device *dev)
+{
+	struct se_port *port, *port_tmp;
+
+	port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
+	if (!(port)) {
+		printk(KERN_ERR "Unable to allocate struct se_port\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&port->sep_alua_list);
+	INIT_LIST_HEAD(&port->sep_list);
+	atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+	spin_lock_init(&port->sep_alua_lock);
+	mutex_init(&port->sep_tg_pt_md_mutex);
+
+	spin_lock(&dev->se_port_lock);
+	if (dev->dev_port_count == 0x0000ffff) {
+		printk(KERN_WARNING "Reached dev->dev_port_count =="
+				" 0x0000ffff\n");
+		spin_unlock(&dev->se_port_lock);
+		return NULL;
+	}
+again:
+	/*
+	 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
+	 * Here is the table from spc4r17 section 7.7.3.8.
+	 *
+	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
+	 *
+	 * Code      Description
+	 * 0h        Reserved
+	 * 1h        Relative port 1, historically known as port A
+	 * 2h        Relative port 2, historically known as port B
+	 * 3h to FFFFh    Relative port 3 through 65 535
+	 */
+	port->sep_rtpi = dev->dev_rpti_counter++;
+	if (!(port->sep_rtpi))
+		goto again;
+
+	list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
+		/*
+		 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
+		 * for 16-bit wrap..
+		 */
+		if (port->sep_rtpi == port_tmp->sep_rtpi)
+			goto again;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return port;
+}
+
+static void core_export_port(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_port *port,
+	struct se_lun *lun)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
+
+	spin_lock(&dev->se_port_lock);
+	spin_lock(&lun->lun_sep_lock);
+	port->sep_tpg = tpg;
+	port->sep_lun = lun;
+	lun->lun_sep = port;
+	spin_unlock(&lun->lun_sep_lock);
+
+	list_add_tail(&port->sep_list, &dev->dev_sep_list);
+	spin_unlock(&dev->se_port_lock);
+
+	if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
+		tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
+		if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
+			printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
+					"_gp_member_t\n");
+			return;
+		}
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+			T10_ALUA(su_dev)->default_tg_pt_gp);
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
+			" Group: alua/default_tg_pt_gp\n",
+			TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
+	}
+
+	dev->dev_port_count++;
+	port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
+}
+
+/*
+ *	Called with struct se_device->se_port_lock spinlock held.
+ */
+static void core_release_port(struct se_device *dev, struct se_port *port)
+{
+	/*
+	 * Wait for any port reference for PR ALL_TG_PT=1 operation
+	 * to complete in __core_scsi3_alloc_registration()
+	 */
+	spin_unlock(&dev->se_port_lock);
+	if (atomic_read(&port->sep_tg_pt_ref_cnt))
+		cpu_relax();
+	spin_lock(&dev->se_port_lock);
+
+	core_alua_free_tg_pt_gp_mem(port);
+
+	list_del(&port->sep_list);
+	dev->dev_port_count--;
+	kfree(port);
+
+	return;
+}
+
+int core_dev_export(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	struct se_port *port;
+
+	port = core_alloc_port(dev);
+	if (!(port))
+		return -1;
+
+	lun->lun_se_dev = dev;
+	se_dev_start(dev);
+
+	atomic_inc(&dev->dev_export_obj.obj_access_count);
+	core_export_port(dev, tpg, port, lun);
+	return 0;
+}
+
+void core_dev_unexport(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	struct se_port *port = lun->lun_sep;
+
+	spin_lock(&lun->lun_sep_lock);
+	if (lun->lun_se_dev == NULL) {
+		spin_unlock(&lun->lun_sep_lock);
+		return;
+	}
+	spin_unlock(&lun->lun_sep_lock);
+
+	spin_lock(&dev->se_port_lock);
+	atomic_dec(&dev->dev_export_obj.obj_access_count);
+	core_release_port(dev, port);
+	spin_unlock(&dev->se_port_lock);
+
+	se_dev_stop(dev);
+	lun->lun_se_dev = NULL;
+}
+
+int transport_core_report_lun_response(struct se_cmd *se_cmd)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun;
+	struct se_session *se_sess = SE_SESS(se_cmd);
+	struct se_task *se_task;
+	unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
+	u32 cdb_offset = 0, lun_count = 0, offset = 8;
+	u64 i, lun;
+
+	list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
+		break;
+
+	if (!(se_task)) {
+		printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+
+	/*
+	 * If no struct se_session pointer is present, this struct se_cmd is
+	 * coming via a target_core_mod PASSTHROUGH op, and not through
+	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
+	 */
+	if (!(se_sess)) {
+		lun = 0;
+		buf[offset++] = ((lun >> 56) & 0xff);
+		buf[offset++] = ((lun >> 48) & 0xff);
+		buf[offset++] = ((lun >> 40) & 0xff);
+		buf[offset++] = ((lun >> 32) & 0xff);
+		buf[offset++] = ((lun >> 24) & 0xff);
+		buf[offset++] = ((lun >> 16) & 0xff);
+		buf[offset++] = ((lun >> 8) & 0xff);
+		buf[offset++] = (lun & 0xff);
+		lun_count = 1;
+		goto done;
+	}
+
+	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &SE_NODE_ACL(se_sess)->device_list[i];
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+		se_lun = deve->se_lun;
+		/*
+		 * We determine the correct LUN LIST LENGTH even once we
+		 * have reached the initial allocation length.
+		 * See SPC2-R20 7.19.
+		 */
+		lun_count++;
+		if ((cdb_offset + 8) >= se_cmd->data_length)
+			continue;
+
+		lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
+		buf[offset++] = ((lun >> 56) & 0xff);
+		buf[offset++] = ((lun >> 48) & 0xff);
+		buf[offset++] = ((lun >> 40) & 0xff);
+		buf[offset++] = ((lun >> 32) & 0xff);
+		buf[offset++] = ((lun >> 24) & 0xff);
+		buf[offset++] = ((lun >> 16) & 0xff);
+		buf[offset++] = ((lun >> 8) & 0xff);
+		buf[offset++] = (lun & 0xff);
+		cdb_offset += 8;
+	}
+	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+	/*
+	 * See SPC3 r07, page 159.
+	 */
+done:
+	lun_count *= 8;
+	buf[0] = ((lun_count >> 24) & 0xff);
+	buf[1] = ((lun_count >> 16) & 0xff);
+	buf[2] = ((lun_count >> 8) & 0xff);
+	buf[3] = (lun_count & 0xff);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	se_release_device_for_hba():
+ *
+ *
+ */
+void se_release_device_for_hba(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
+		se_dev_stop(dev);
+
+	if (dev->dev_ptr) {
+		kthread_stop(dev->process_thread);
+		if (dev->transport->free_device)
+			dev->transport->free_device(dev->dev_ptr);
+	}
+
+	spin_lock(&hba->device_lock);
+	list_del(&dev->dev_list);
+	hba->dev_count--;
+	spin_unlock(&hba->device_lock);
+
+	core_scsi3_free_all_registrations(dev);
+	se_release_vpd_for_dev(dev);
+
+	kfree(dev->dev_status_queue_obj);
+	kfree(dev->dev_queue_obj);
+	kfree(dev);
+
+	return;
+}
+
+void se_release_vpd_for_dev(struct se_device *dev)
+{
+	struct t10_vpd *vpd, *vpd_tmp;
+
+	spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+	list_for_each_entry_safe(vpd, vpd_tmp,
+			&DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
+		list_del(&vpd->vpd_list);
+		kfree(vpd);
+	}
+	spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+
+	return;
+}
+
+/*
+ * Called with struct se_hba->device_lock held.
+ */
+void se_clear_dev_ports(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+	struct se_lun *lun;
+	struct se_portal_group *tpg;
+	struct se_port *sep, *sep_tmp;
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+		spin_unlock(&dev->se_port_lock);
+		spin_unlock(&hba->device_lock);
+
+		lun = sep->sep_lun;
+		tpg = sep->sep_tpg;
+		spin_lock(&lun->lun_sep_lock);
+		if (lun->lun_se_dev == NULL) {
+			spin_unlock(&lun->lun_sep_lock);
+			continue;
+		}
+		spin_unlock(&lun->lun_sep_lock);
+
+		core_dev_del_lun(tpg, lun->unpacked_lun);
+
+		spin_lock(&hba->device_lock);
+		spin_lock(&dev->se_port_lock);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return;
+}
+
+/*	se_free_virtual_device():
+ *
+ *	Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
+ */
+int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
+{
+	spin_lock(&hba->device_lock);
+	se_clear_dev_ports(dev);
+	spin_unlock(&hba->device_lock);
+
+	core_alua_free_lu_gp_mem(dev);
+	se_release_device_for_hba(dev);
+
+	return 0;
+}
+
+static void se_dev_start(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	spin_lock(&hba->device_lock);
+	atomic_inc(&dev->dev_obj.obj_access_count);
+	if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
+		if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
+		} else if (dev->dev_status &
+			   TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
+			dev->dev_status &=
+				~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+		}
+	}
+	spin_unlock(&hba->device_lock);
+}
+
+static void se_dev_stop(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	spin_lock(&hba->device_lock);
+	atomic_dec(&dev->dev_obj.obj_access_count);
+	if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
+		if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
+		} else if (dev->dev_status &
+			   TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+		}
+	}
+	spin_unlock(&hba->device_lock);
+
+	while (atomic_read(&hba->dev_mib_access_count))
+		cpu_relax();
+}
+
+int se_dev_check_online(struct se_device *dev)
+{
+	int ret;
+
+	spin_lock_irq(&dev->dev_status_lock);
+	ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+	       (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
+	spin_unlock_irq(&dev->dev_status_lock);
+
+	return ret;
+}
+
+int se_dev_check_shutdown(struct se_device *dev)
+{
+	int ret;
+
+	spin_lock_irq(&dev->dev_status_lock);
+	ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
+	spin_unlock_irq(&dev->dev_status_lock);
+
+	return ret;
+}
+
+void se_dev_set_default_attribs(
+	struct se_device *dev,
+	struct se_dev_limits *dev_limits)
+{
+	struct queue_limits *limits = &dev_limits->limits;
+
+	DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
+	DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
+	DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
+	DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+	DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+	DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
+	DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
+	DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
+	DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
+	DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
+	DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+	/*
+	 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
+	 * iblock_create_virtdevice() from struct queue_limits values
+	 * if blk_queue_discard()==1
+	 */
+	DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+	DEV_ATTRIB(dev)->max_unmap_block_desc_count =
+				DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+	DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+	DEV_ATTRIB(dev)->unmap_granularity_alignment =
+				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+	/*
+	 * block_size is based on subsystem plugin dependent requirements.
+	 */
+	DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
+	DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
+	/*
+	 * max_sectors is based on subsystem plugin dependent requirements.
+	 */
+	DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
+	DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
+	/*
+	 * Set optimal_sectors from max_sectors, which can be lowered via
+	 * configfs.
+	 */
+	DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
+	/*
+	 * queue_depth is based on subsystem plugin dependent requirements.
+	 */
+	DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
+	DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
+}
+
+int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
+{
+	if (task_timeout > DA_TASK_TIMEOUT_MAX) {
+		printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
+			" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
+		return -1;
+	} else {
+		DEV_ATTRIB(dev)->task_timeout = task_timeout;
+		printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
+			dev, task_timeout);
+	}
+
+	return 0;
+}
+
+int se_dev_set_max_unmap_lba_count(
+	struct se_device *dev,
+	u32 max_unmap_lba_count)
+{
+	DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
+	printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
+			dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
+	return 0;
+}
+
+int se_dev_set_max_unmap_block_desc_count(
+	struct se_device *dev,
+	u32 max_unmap_block_desc_count)
+{
+	DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
+	printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
+			dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
+	return 0;
+}
+
+int se_dev_set_unmap_granularity(
+	struct se_device *dev,
+	u32 unmap_granularity)
+{
+	DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
+	printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
+			dev, DEV_ATTRIB(dev)->unmap_granularity);
+	return 0;
+}
+
+int se_dev_set_unmap_granularity_alignment(
+	struct se_device *dev,
+	u32 unmap_granularity_alignment)
+{
+	DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
+	printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
+			dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
+	return 0;
+}
+
+int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->dpo_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_dpo = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
+			" bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
+	return 0;
+}
+
+int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_write_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_fua_write = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+			dev, DEV_ATTRIB(dev)->emulate_fua_write);
+	return 0;
+}
+
+int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_read_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_fua_read = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
+			dev, DEV_ATTRIB(dev)->emulate_fua_read);
+	return 0;
+}
+
+int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->write_cache_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_write_cache = flag;
+	printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+			dev, DEV_ATTRIB(dev)->emulate_write_cache);
+	return 0;
+}
+
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1) && (flag != 2)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+			" UA_INTRLCK_CTRL while dev_export_obj: %d count"
+			" exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
+	printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+		dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
+
+	return 0;
+}
+
+int se_dev_set_emulate_tas(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
+			" dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_tas = flag;
+	printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+		dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
+
+	return 0;
+}
+
+int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+		printk(KERN_ERR "Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	DEV_ATTRIB(dev)->emulate_tpu = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+				dev, flag);
+	return 0;
+}
+
+int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+		printk(KERN_ERR "Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	DEV_ATTRIB(dev)->emulate_tpws = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+				dev, flag);
+	return 0;
+}
+
+int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	DEV_ATTRIB(dev)->enforce_pr_isids = flag;
+	printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+		(DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
+	return 0;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
+{
+	u32 orig_queue_depth = dev->queue_depth;
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
+			" dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	if (!(queue_depth)) {
+		printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
+			"_depth\n", dev);
+		return -1;
+	}
+
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+			printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
+				" exceeds TCM/SE_Device TCQ: %u\n",
+				dev, queue_depth,
+				DEV_ATTRIB(dev)->hw_queue_depth);
+			return -1;
+		}
+	} else {
+		if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
+			if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+				printk(KERN_ERR "dev[%p]: Passed queue_depth:"
+					" %u exceeds TCM/SE_Device MAX"
+					" TCQ: %u\n", dev, queue_depth,
+					DEV_ATTRIB(dev)->hw_queue_depth);
+				return -1;
+			}
+		}
+	}
+
+	DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
+	if (queue_depth > orig_queue_depth)
+		atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
+	else if (queue_depth < orig_queue_depth)
+		atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
+
+	printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
+			dev, queue_depth);
+	return 0;
+}
+
+int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
+{
+	int force = 0; /* Force setting for VDEVS */
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+			" max_sectors while dev_export_obj: %d count exists\n",
+			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	if (!(max_sectors)) {
+		printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
+			" max_sectors\n", dev);
+		return -1;
+	}
+	if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
+		printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
+			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
+				DA_STATUS_MAX_SECTORS_MIN);
+		return -1;
+	}
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
+			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+				" greater than TCM/SE_Device max_sectors:"
+				" %u\n", dev, max_sectors,
+				DEV_ATTRIB(dev)->hw_max_sectors);
+			 return -1;
+		}
+	} else {
+		if (!(force) && (max_sectors >
+				 DEV_ATTRIB(dev)->hw_max_sectors)) {
+			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+				" greater than TCM/SE_Device max_sectors"
+				": %u, use force=1 to override.\n", dev,
+				max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
+			return -1;
+		}
+		if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+				" greater than DA_STATUS_MAX_SECTORS_MAX:"
+				" %u\n", dev, max_sectors,
+				DA_STATUS_MAX_SECTORS_MAX);
+			return -1;
+		}
+	}
+
+	DEV_ATTRIB(dev)->max_sectors = max_sectors;
+	printk("dev[%p]: SE Device max_sectors changed to %u\n",
+			dev, max_sectors);
+	return 0;
+}
+
+int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
+{
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+			" optimal_sectors while dev_export_obj: %d count exists\n",
+			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
+				" changed for TCM/pSCSI\n", dev);
+		return -EINVAL;
+	}
+	if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
+		printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
+			" greater than max_sectors: %u\n", dev,
+			optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
+		return -EINVAL;
+	}
+
+	DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
+	printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
+			dev, optimal_sectors);
+	return 0;
+}
+
+int se_dev_set_block_size(struct se_device *dev, u32 block_size)
+{
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
+			" while dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+
+	if ((block_size != 512) &&
+	    (block_size != 1024) &&
+	    (block_size != 2048) &&
+	    (block_size != 4096)) {
+		printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
+			" for SE device, must be 512, 1024, 2048 or 4096\n",
+			dev, block_size);
+		return -1;
+	}
+
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
+			" Physical Device, use for Linux/SCSI to change"
+			" block_size for underlying hardware\n", dev);
+		return -1;
+	}
+
+	DEV_ATTRIB(dev)->block_size = block_size;
+	printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
+			dev, block_size);
+	return 0;
+}
+
+struct se_lun *core_dev_add_lun(
+	struct se_portal_group *tpg,
+	struct se_hba *hba,
+	struct se_device *dev,
+	u32 lun)
+{
+	struct se_lun *lun_p;
+	u32 lun_access = 0;
+
+	if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
+		printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
+			atomic_read(&dev->dev_access_obj.obj_access_count));
+		return NULL;
+	}
+
+	lun_p = core_tpg_pre_addlun(tpg, lun);
+	if ((IS_ERR(lun_p)) || !(lun_p))
+		return NULL;
+
+	if (dev->dev_flags & DF_READ_ONLY)
+		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	else
+		lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+
+	if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
+		return NULL;
+
+	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+		" CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
+		TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
+	/*
+	 * Update LUN maps for dynamically added initiators when
+	 * generate_node_acl is enabled.
+	 */
+	if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
+		struct se_node_acl *acl;
+		spin_lock_bh(&tpg->acl_node_lock);
+		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+			if (acl->dynamic_node_acl) {
+				spin_unlock_bh(&tpg->acl_node_lock);
+				core_tpg_add_node_to_devs(acl, tpg);
+				spin_lock_bh(&tpg->acl_node_lock);
+			}
+		}
+		spin_unlock_bh(&tpg->acl_node_lock);
+	}
+
+	return lun_p;
+}
+
+/*      core_dev_del_lun():
+ *
+ *
+ */
+int core_dev_del_lun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun)
+{
+	struct se_lun *lun;
+	int ret = 0;
+
+	lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
+	if (!(lun))
+		return ret;
+
+	core_tpg_post_dellun(tpg, lun);
+
+	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+		" device object\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
+		TPG_TFO(tpg)->get_fabric_name());
+
+	return 0;
+}
+
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+			"_PER_TPG-1: %u for Target Portal Group: %hu\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+/*      core_dev_get_lun():
+ *
+ *
+ */
+static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+			"_TPG-1: %u for Target Portal Group: %hu\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	u32 mapped_lun,
+	char *initiatorname,
+	int *ret)
+{
+	struct se_lun_acl *lacl;
+	struct se_node_acl *nacl;
+
+	if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
+		printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
+			TPG_TFO(tpg)->get_fabric_name());
+		*ret = -EOVERFLOW;
+		return NULL;
+	}
+	nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (!(nacl)) {
+		*ret = -EINVAL;
+		return NULL;
+	}
+	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+	if (!(lacl)) {
+		printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
+		*ret = -ENOMEM;
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&lacl->lacl_list);
+	lacl->mapped_lun = mapped_lun;
+	lacl->se_lun_nacl = nacl;
+	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+
+	return lacl;
+}
+
+int core_dev_add_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl,
+	u32 unpacked_lun,
+	u32 lun_access)
+{
+	struct se_lun *lun;
+	struct se_node_acl *nacl;
+
+	lun = core_dev_get_lun(tpg, unpacked_lun);
+	if (!(lun)) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		return -EINVAL;
+	}
+
+	nacl = lacl->se_lun_nacl;
+	if (!(nacl))
+		return -EINVAL;
+
+	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
+	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
+		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+
+	lacl->se_lun = lun;
+
+	if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
+			lun_access, nacl, tpg, 1) < 0)
+		return -EINVAL;
+
+	spin_lock(&lun->lun_acl_lock);
+	list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
+	atomic_inc(&lun->lun_acl_count);
+	smp_mb__after_atomic_inc();
+	spin_unlock(&lun->lun_acl_lock);
+
+	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+		" InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
+		lacl->initiatorname);
+	/*
+	 * Check to see if there are any existing persistent reservation APTPL
+	 * pre-registrations that need to be enabled for this LUN ACL..
+	 */
+	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
+	return 0;
+}
+
+/*      core_dev_del_initiator_node_lun_acl():
+ *
+ *
+ */
+int core_dev_del_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	struct se_lun_acl *lacl)
+{
+	struct se_node_acl *nacl;
+
+	nacl = lacl->se_lun_nacl;
+	if (!(nacl))
+		return -EINVAL;
+
+	spin_lock(&lun->lun_acl_lock);
+	list_del(&lacl->lacl_list);
+	atomic_dec(&lun->lun_acl_count);
+	smp_mb__after_atomic_dec();
+	spin_unlock(&lun->lun_acl_lock);
+
+	core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
+		TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+	lacl->se_lun = NULL;
+
+	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+		" InitiatorNode: %s Mapped LUN: %u\n",
+		TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+		lacl->initiatorname, lacl->mapped_lun);
+
+	return 0;
+}
+
+void core_dev_free_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl)
+{
+	printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+		" Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg),
+		TPG_TFO(tpg)->get_fabric_name(),
+		lacl->initiatorname, lacl->mapped_lun);
+
+	kfree(lacl);
+}
+
+int core_dev_setup_virtual_lun0(void)
+{
+	struct se_hba *hba;
+	struct se_device *dev;
+	struct se_subsystem_dev *se_dev = NULL;
+	struct se_subsystem_api *t;
+	char buf[16];
+	int ret;
+
+	hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
+	if (IS_ERR(hba))
+		return PTR_ERR(hba);
+
+	se_global->g_lun0_hba = hba;
+	t = hba->transport;
+
+	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+	if (!(se_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct se_subsystem_dev\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+	spin_lock_init(&se_dev->t10_reservation.registration_lock);
+	spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+	spin_lock_init(&se_dev->se_dev_lock);
+	se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+	se_dev->t10_wwn.t10_sub_dev = se_dev;
+	se_dev->t10_alua.t10_sub_dev = se_dev;
+	se_dev->se_dev_attrib.da_sub_dev = se_dev;
+	se_dev->se_dev_hba = hba;
+
+	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
+	if (!(se_dev->se_dev_su_ptr)) {
+		printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+			" from allocate_virtdevice()\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	se_global->g_lun0_su_dev = se_dev;
+
+	memset(buf, 0, 16);
+	sprintf(buf, "rd_pages=8");
+	t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+
+	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+	if (!(dev) || IS_ERR(dev)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	se_dev->se_dev_ptr = dev;
+	se_global->g_lun0_dev = dev;
+
+	return 0;
+out:
+	se_global->g_lun0_su_dev = NULL;
+	kfree(se_dev);
+	if (se_global->g_lun0_hba) {
+		core_delete_hba(se_global->g_lun0_hba);
+		se_global->g_lun0_hba = NULL;
+	}
+	return ret;
+}
+
+
+void core_dev_release_virtual_lun0(void)
+{
+	struct se_hba *hba = se_global->g_lun0_hba;
+	struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
+
+	if (!(hba))
+		return;
+
+	if (se_global->g_lun0_dev)
+		se_free_virtual_device(se_global->g_lun0_dev, hba);
+
+	kfree(su_dev);
+	core_delete_hba(hba);
+}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
new file mode 100644
index 0000000..32b148d
--- /dev/null
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -0,0 +1,996 @@
+/*******************************************************************************
+* Filename: target_core_fabric_configfs.c
+ *
+ * This file contains generic fabric module configfs infrastructure for
+ * TCM v4.x code
+ *
+ * Copyright (c) 2010 Rising Tide Systems
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
+*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{									\
+	struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl;	\
+	struct config_item_type *cit = &tfc->tfc_##_name##_cit;		\
+									\
+	cit->ct_item_ops = _item_ops;					\
+	cit->ct_group_ops = _group_ops;					\
+	cit->ct_attrs = _attrs;						\
+	cit->ct_owner = tf->tf_module;					\
+	printk("Setup generic %s\n", __stringify(_name));		\
+}
+
+/* Start of tfc_tpg_mappedlun_cit */
+
+static int target_fabric_mappedlun_link(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+			struct se_lun, lun_group);
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg;
+	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
+	int ret = 0, lun_access;
+	/*
+	 * Ensure that the source port exists
+	 */
+	if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
+		printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
+				"_tpg does not exist\n");
+		return -EINVAL;
+	}
+	se_tpg = lun->lun_sep->sep_tpg;
+
+	nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+	tpg_ci = &nacl_ci->ci_group->cg_item;
+	wwn_ci = &tpg_ci->ci_group->cg_item;
+	tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
+	wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
+	/*
+	 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
+	 */
+	if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
+		printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
+			config_item_name(wwn_ci));
+		return -EINVAL;
+	}
+	if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
+		printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
+			" TPGT: %s\n", config_item_name(wwn_ci),
+			config_item_name(tpg_ci));
+		return -EINVAL;
+	}
+	/*
+	 * If this struct se_node_acl was dynamically generated with
+	 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
+	 * which be will write protected (READ-ONLY) when
+	 * tpg_1/attrib/demo_mode_write_protect=1
+	 */
+	spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
+	deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
+		lun_access = deve->lun_flags;
+	else
+		lun_access =
+			(TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
+				se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
+					   TRANSPORT_LUNFLAGS_READ_WRITE;
+	spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
+	/*
+	 * Determine the actual mapped LUN value user wants..
+	 *
+	 * This value is what the SCSI Initiator actually sees the
+	 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+	 */
+	ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
+			lun->unpacked_lun, lun_access);
+
+	return (ret < 0) ? -EINVAL : 0;
+}
+
+static int target_fabric_mappedlun_unlink(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_lun *lun;
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
+	struct se_portal_group *se_tpg;
+	/*
+	 * Determine if the underlying MappedLUN has already been released..
+	 */
+	if (!(deve->se_lun))
+		return 0;
+
+	lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
+	se_tpg = lun->lun_sep->sep_tpg;
+
+	core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
+	return 0;
+}
+
+CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
+#define TCM_MAPPEDLUN_ATTR(_name, _mode)				\
+static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_fabric_mappedlun_show_##_name,				\
+	target_fabric_mappedlun_store_##_name);
+
+static ssize_t target_fabric_mappedlun_show_write_protect(
+	struct se_lun_acl *lacl,
+	char *page)
+{
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t len;
+
+	spin_lock_irq(&se_nacl->device_list_lock);
+	deve = &se_nacl->device_list[lacl->mapped_lun];
+	len = sprintf(page, "%d\n",
+			(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
+			1 : 0);
+	spin_unlock_irq(&se_nacl->device_list_lock);
+
+	return len;
+}
+
+static ssize_t target_fabric_mappedlun_store_write_protect(
+	struct se_lun_acl *lacl,
+	const char *page,
+	size_t count)
+{
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	unsigned long op;
+
+	if (strict_strtoul(page, 0, &op))
+		return -EINVAL;
+
+	if ((op != 1) && (op != 0))
+		return -EINVAL;
+
+	core_update_device_list_access(lacl->mapped_lun, (op) ?
+			TRANSPORT_LUNFLAGS_READ_ONLY :
+			TRANSPORT_LUNFLAGS_READ_WRITE,
+			lacl->se_lun_nacl);
+
+	printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
+		" Mapped LUN: %u Write Protect bit to %s\n",
+		TPG_TFO(se_tpg)->get_fabric_name(),
+		lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+
+	return count;
+
+}
+
+TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
+
+static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
+	&target_fabric_mappedlun_write_protect.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+	.show_attribute		= target_fabric_mappedlun_attr_show,
+	.store_attribute	= target_fabric_mappedlun_attr_store,
+	.allow_link		= target_fabric_mappedlun_link,
+	.drop_link		= target_fabric_mappedlun_unlink,
+};
+
+TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
+		target_fabric_mappedlun_attrs);
+
+/* End of tfc_tpg_mappedlun_cit */
+
+/* Start of tfc_tpg_nacl_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
+
+static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
+	.show_attribute		= target_fabric_nacl_attrib_attr_show,
+	.store_attribute	= target_fabric_nacl_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_attrib_cit */
+
+/* Start of tfc_tpg_nacl_auth_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
+
+static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
+	.show_attribute		= target_fabric_nacl_auth_attr_show,
+	.store_attribute	= target_fabric_nacl_auth_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_auth_cit */
+
+/* Start of tfc_tpg_nacl_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
+
+static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
+	.show_attribute		= target_fabric_nacl_param_attr_show,
+	.store_attribute	= target_fabric_nacl_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_param_cit */
+
+/* Start of tfc_tpg_nacl_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
+
+static struct config_group *target_fabric_make_mappedlun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl = container_of(group,
+			struct se_node_acl, acl_group);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_lun_acl *lacl;
+	struct config_item *acl_ci;
+	char *buf;
+	unsigned long mapped_lun;
+	int ret = 0;
+
+	acl_ci = &group->cg_item;
+	if (!(acl_ci)) {
+		printk(KERN_ERR "Unable to locatel acl_ci\n");
+		return NULL;
+	}
+
+	buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate memory for name buf\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	snprintf(buf, strlen(name) + 1, "%s", name);
+	/*
+	 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
+	 */
+	if (strstr(buf, "lun_") != buf) {
+		printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
+			" name: %s\n", buf, name);
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * Determine the Mapped LUN value.  This is what the SCSI Initiator
+	 * Port will actually see.
+	 */
+	if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
+			config_item_name(acl_ci), &ret);
+	if (!(lacl))
+		goto out;
+
+	config_group_init_type_name(&lacl->se_lun_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+
+	kfree(buf);
+	return &lacl->se_lun_group;
+out:
+	kfree(buf);
+	return ERR_PTR(ret);
+}
+
+static void target_fabric_drop_mappedlun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+			struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+	config_item_put(item);
+	core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+	.show_attribute		= target_fabric_nacl_base_attr_show,
+	.store_attribute	= target_fabric_nacl_base_attr_store,
+};
+
+static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
+	.make_group		= target_fabric_make_mappedlun,
+	.drop_item		= target_fabric_drop_mappedlun,
+};
+
+TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+		&target_fabric_nacl_base_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_base_cit */
+
+/* Start of tfc_tpg_nacl_cit */
+
+static struct config_group *target_fabric_make_nodeacl(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_acl_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_node_acl *se_nacl;
+	struct config_group *nacl_cg;
+
+	if (!(tf->tf_ops.fabric_make_nodeacl)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
+	if (IS_ERR(se_nacl))
+		return ERR_PTR(PTR_ERR(se_nacl));
+
+	nacl_cg = &se_nacl->acl_group;
+	nacl_cg->default_groups = se_nacl->acl_default_groups;
+	nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
+	nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
+	nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
+	nacl_cg->default_groups[3] = NULL;
+
+	config_group_init_type_name(&se_nacl->acl_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+	config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+	config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+	config_group_init_type_name(&se_nacl->acl_param_group, "param",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+
+	return &se_nacl->acl_group;
+}
+
+static void target_fabric_drop_nodeacl(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_acl_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct config_item *df_item;
+	struct config_group *nacl_cg;
+	int i;
+
+	nacl_cg = &se_nacl->acl_group;
+	for (i = 0; nacl_cg->default_groups[i]; i++) {
+		df_item = &nacl_cg->default_groups[i]->cg_item;
+		nacl_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+	tf->tf_ops.fabric_drop_nodeacl(se_nacl);
+}
+
+static struct configfs_group_operations target_fabric_nacl_group_ops = {
+	.make_group	= target_fabric_make_nodeacl,
+	.drop_item	= target_fabric_drop_nodeacl,
+};
+
+TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_cit */
+
+/* Start of tfc_tpg_np_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
+
+static struct configfs_item_operations target_fabric_np_base_item_ops = {
+	.show_attribute		= target_fabric_np_base_attr_show,
+	.store_attribute	= target_fabric_np_base_attr_store,
+};
+
+TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_np_base_cit */
+
+/* Start of tfc_tpg_np_cit */
+
+static struct config_group *target_fabric_make_np(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+				struct se_portal_group, tpg_np_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_tpg_np *se_tpg_np;
+
+	if (!(tf->tf_ops.fabric_make_np)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
+	if (!(se_tpg_np) || IS_ERR(se_tpg_np))
+		return ERR_PTR(-EINVAL);
+
+	config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+
+	return &se_tpg_np->tpg_np_group;
+}
+
+static void target_fabric_drop_np(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+				struct se_portal_group, tpg_np_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+				struct se_tpg_np, tpg_np_group);
+
+	config_item_put(item);
+	tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
+static struct configfs_group_operations target_fabric_np_group_ops = {
+	.make_group	= &target_fabric_make_np,
+	.drop_item	= &target_fabric_drop_np,
+};
+
+TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
+
+/* End of tfc_tpg_np_cit */
+
+/* Start of tfc_tpg_port_cit */
+
+CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
+#define TCM_PORT_ATTR(_name, _mode)					\
+static struct target_fabric_port_attribute target_fabric_port_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_fabric_port_show_attr_##_name,				\
+	target_fabric_port_store_attr_##_name);
+
+#define TCM_PORT_ATTOR_RO(_name)					\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_fabric_port_show_attr_##_name);
+
+/*
+ * alua_tg_pt_gp
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_offline
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_offline_bit(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_offline_bit(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_status
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_secondary_status(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_secondary_status(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_write_md
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_secondary_write_metadata(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_secondary_write_metadata(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
+
+
+static struct configfs_attribute *target_fabric_port_attrs[] = {
+	&target_fabric_port_alua_tg_pt_gp.attr,
+	&target_fabric_port_alua_tg_pt_offline.attr,
+	&target_fabric_port_alua_tg_pt_status.attr,
+	&target_fabric_port_alua_tg_pt_write_md.attr,
+	NULL,
+};
+
+CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
+
+static int target_fabric_port_link(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct config_item *tpg_ci;
+	struct se_device *dev;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_lun *lun_p;
+	struct se_portal_group *se_tpg;
+	struct se_subsystem_dev *se_dev = container_of(
+				to_config_group(se_dev_ci), struct se_subsystem_dev,
+				se_dev_group);
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
+	se_tpg = container_of(to_config_group(tpg_ci),
+				struct se_portal_group, tpg_group);
+	tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (lun->lun_se_dev !=  NULL) {
+		printk(KERN_ERR "Port Symlink already exists\n");
+		return -EEXIST;
+	}
+
+	dev = se_dev->se_dev_ptr;
+	if (!(dev)) {
+		printk(KERN_ERR "Unable to locate struct se_device pointer from"
+			" %s\n", config_item_name(se_dev_ci));
+		ret = -ENODEV;
+		goto out;
+	}
+
+	lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
+				lun->unpacked_lun);
+	if ((IS_ERR(lun_p)) || !(lun_p)) {
+		printk(KERN_ERR "core_dev_add_lun() failed\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (tf->tf_ops.fabric_post_link) {
+		/*
+		 * Call the optional fabric_post_link() to allow a
+		 * fabric module to setup any additional state once
+		 * core_dev_add_lun() has been called..
+		 */
+		tf->tf_ops.fabric_post_link(se_tpg, lun);
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static int target_fabric_port_unlink(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (tf->tf_ops.fabric_pre_unlink) {
+		/*
+		 * Call the optional fabric_pre_unlink() to allow a
+		 * fabric module to release any additional stat before
+		 * core_dev_del_lun() is called.
+		*/
+		tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
+	}
+
+	core_dev_del_lun(se_tpg, lun->unpacked_lun);
+	return 0;
+}
+
+static struct configfs_item_operations target_fabric_port_item_ops = {
+	.show_attribute		= target_fabric_port_attr_show,
+	.store_attribute	= target_fabric_port_attr_store,
+	.allow_link		= target_fabric_port_link,
+	.drop_link		= target_fabric_port_unlink,
+};
+
+TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
+
+/* End of tfc_tpg_port_cit */
+
+/* Start of tfc_tpg_lun_cit */
+
+static struct config_group *target_fabric_make_lun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_lun *lun;
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_lun_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	unsigned long unpacked_lun;
+
+	if (strstr(name, "lun_") != name) {
+		printk(KERN_ERR "Unable to locate \'_\" in"
+				" \"lun_$LUN_NUMBER\"\n");
+		return ERR_PTR(-EINVAL);
+	}
+	if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
+	if (!(lun))
+		return ERR_PTR(-EINVAL);
+
+	config_group_init_type_name(&lun->lun_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+
+	return &lun->lun_group;
+}
+
+static void target_fabric_drop_lun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_lun_group_ops = {
+	.make_group	= &target_fabric_make_lun,
+	.drop_item	= &target_fabric_drop_lun,
+};
+
+TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
+
+/* End of tfc_tpg_lun_cit */
+
+/* Start of tfc_tpg_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
+
+static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
+	.show_attribute		= target_fabric_tpg_attrib_attr_show,
+	.store_attribute	= target_fabric_tpg_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_attrib_cit */
+
+/* Start of tfc_tpg_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
+
+static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
+	.show_attribute		= target_fabric_tpg_param_attr_show,
+	.store_attribute	= target_fabric_tpg_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_param_cit */
+
+/* Start of tfc_tpg_base_cit */
+/*
+ * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
+
+static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+	.show_attribute		= target_fabric_tpg_attr_show,
+	.store_attribute	= target_fabric_tpg_attr_store,
+};
+
+TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_base_cit */
+
+/* Start of tfc_tpg_cit */
+
+static struct config_group *target_fabric_make_tpg(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+	struct se_portal_group *se_tpg;
+
+	if (!(tf->tf_ops.fabric_make_tpg)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
+	if (!(se_tpg) || IS_ERR(se_tpg))
+		return ERR_PTR(-EINVAL);
+	/*
+	 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
+	 */
+	se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
+	se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
+	se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
+	se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
+	se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
+	se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
+	se_tpg->tpg_group.default_groups[5] = NULL;
+
+	config_group_init_type_name(&se_tpg->tpg_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+	config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
+			&TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+	config_group_init_type_name(&se_tpg->tpg_np_group, "np",
+			&TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+	config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+	config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
+			&TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+	config_group_init_type_name(&se_tpg->tpg_param_group, "param",
+			&TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+
+	return &se_tpg->tpg_group;
+}
+
+static void target_fabric_drop_tpg(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+				struct se_portal_group, tpg_group);
+	struct config_group *tpg_cg = &se_tpg->tpg_group;
+	struct config_item *df_item;
+	int i;
+	/*
+	 * Release default groups, but do not release tpg_cg->default_groups
+	 * memory as it is statically allocated at se_tpg->tpg_default_groups.
+	 */
+	for (i = 0; tpg_cg->default_groups[i]; i++) {
+		df_item = &tpg_cg->default_groups[i]->cg_item;
+		tpg_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+	tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
+static struct configfs_group_operations target_fabric_tpg_group_ops = {
+	.make_group	= target_fabric_make_tpg,
+	.drop_item	= target_fabric_drop_tpg,
+};
+
+TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
+
+/* End of tfc_tpg_cit */
+
+/* Start of tfc_wwn_cit */
+
+static struct config_group *target_fabric_make_wwn(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf = container_of(group,
+				struct target_fabric_configfs, tf_group);
+	struct se_wwn *wwn;
+
+	if (!(tf->tf_ops.fabric_make_wwn)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
+	if (!(wwn) || IS_ERR(wwn))
+		return ERR_PTR(-EINVAL);
+
+	wwn->wwn_tf = tf;
+	config_group_init_type_name(&wwn->wwn_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_cit);
+
+	return &wwn->wwn_group;
+}
+
+static void target_fabric_drop_wwn(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct target_fabric_configfs *tf = container_of(group,
+				struct target_fabric_configfs, tf_group);
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+
+	config_item_put(item);
+	tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_group_operations target_fabric_wwn_group_ops = {
+	.make_group	= target_fabric_make_wwn,
+	.drop_item	= target_fabric_drop_wwn,
+};
+/*
+ * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
+
+static struct configfs_item_operations target_fabric_wwn_item_ops = {
+	.show_attribute		= target_fabric_wwn_attr_show,
+	.store_attribute	= target_fabric_wwn_attr_store,
+};
+
+TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
+
+/* End of tfc_wwn_cit */
+
+/* Start of tfc_discovery_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
+		tf_disc_group);
+
+static struct configfs_item_operations target_fabric_discovery_item_ops = {
+	.show_attribute		= target_fabric_discovery_attr_show,
+	.store_attribute	= target_fabric_discovery_attr_store,
+};
+
+TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
+
+/* End of tfc_discovery_cit */
+
+int target_fabric_setup_cits(struct target_fabric_configfs *tf)
+{
+	target_fabric_setup_discovery_cit(tf);
+	target_fabric_setup_wwn_cit(tf);
+	target_fabric_setup_tpg_cit(tf);
+	target_fabric_setup_tpg_base_cit(tf);
+	target_fabric_setup_tpg_port_cit(tf);
+	target_fabric_setup_tpg_lun_cit(tf);
+	target_fabric_setup_tpg_np_cit(tf);
+	target_fabric_setup_tpg_np_base_cit(tf);
+	target_fabric_setup_tpg_attrib_cit(tf);
+	target_fabric_setup_tpg_param_cit(tf);
+	target_fabric_setup_tpg_nacl_cit(tf);
+	target_fabric_setup_tpg_nacl_base_cit(tf);
+	target_fabric_setup_tpg_nacl_attrib_cit(tf);
+	target_fabric_setup_tpg_nacl_auth_cit(tf);
+	target_fabric_setup_tpg_nacl_param_cit(tf);
+	target_fabric_setup_tpg_mappedlun_cit(tf);
+
+	return 0;
+}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
new file mode 100644
index 0000000..2628564
--- /dev/null
+++ b/drivers/target/target_core_fabric_lib.c
@@ -0,0 +1,451 @@
+/*******************************************************************************
+ * Filename:  target_core_fabric_lib.c
+ *
+ * This file contains generic high level protocol identifier and PR
+ * handlers for TCM fabric modules
+ *
+ * Copyright (c) 2010 Rising Tide Systems, Inc.
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+/*
+ * Handlers for Serial Attached SCSI (SAS)
+ */
+u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	/*
+	 * Return a SAS Serial SCSI Protocol identifier for loopback operations
+	 * This is defined in  section 7.5.1 Table 362 in spc4r17
+	 */
+	return 0x6;
+}
+EXPORT_SYMBOL(sas_get_fabric_proto_ident);
+
+u32 sas_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	unsigned char binary, *ptr;
+	int i;
+	u32 off = 4;
+	/*
+	 * Set PROTOCOL IDENTIFIER to 6h for SAS
+	 */
+	buf[0] = 0x06;
+	/*
+	 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+	 * over SAS Serial SCSI Protocol
+	 */
+	ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
+
+	for (i = 0; i < 16; i += 2) {
+		binary = transport_asciihex_to_binaryhex(&ptr[i]);
+		buf[off++] = binary;
+	}
+	/*
+	 * The SAS Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id);
+
+u32 sas_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	*format_code = 0;
+	/*
+	 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+	 * over SAS Serial SCSI Protocol
+	 *
+	 * The SAS Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id_len);
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+char *sas_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	/*
+	 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+	 * for initiator ports using SCSI over SAS Serial SCSI Protocol
+	 *
+	 * The TransportID for a SAS Initiator Port is of fixed size of
+	 * 24 bytes, and SAS does not contain a I_T nexus identifier,
+	 * so we return the **port_nexus_ptr set to NULL.
+	 */
+	*port_nexus_ptr = NULL;
+	*out_tid_len = 24;
+
+	return (char *)&buf[4];
+}
+EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Fibre Channel Protocol (FCP)
+ */
+u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	return 0x0;	/* 0 = fcp-2 per SPC4 section 7.5.1 */
+}
+EXPORT_SYMBOL(fc_get_fabric_proto_ident);
+
+u32 fc_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	*format_code = 0;
+	/*
+	 * The FC Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id_len);
+
+u32 fc_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	unsigned char binary, *ptr;
+	int i;
+	u32 off = 8;
+	/*
+	 * PROTOCOL IDENTIFIER is 0h for FCP-2
+	 *
+	 * From spc4r17, 7.5.4.2 TransportID for initiator ports using
+	 * SCSI over Fibre Channel
+	 *
+	 * We convert the ASCII formatted N Port name into a binary
+	 * encoded TransportID.
+	 */
+	ptr = &se_nacl->initiatorname[0];
+
+	for (i = 0; i < 24; ) {
+		if (!(strncmp(&ptr[i], ":", 1))) {
+			i++;
+			continue;
+		}
+		binary = transport_asciihex_to_binaryhex(&ptr[i]);
+		buf[off++] = binary;
+		i += 2;
+	}
+	/*
+	 * The FC Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id);
+
+char *fc_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	/*
+	 * The TransportID for a FC N Port is of fixed size of
+	 * 24 bytes, and FC does not contain a I_T nexus identifier,
+	 * so we return the **port_nexus_ptr set to NULL.
+	 */
+	*port_nexus_ptr = NULL;
+	*out_tid_len = 24;
+
+	 return (char *)&buf[8];
+}
+EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Internet Small Computer Systems Interface (iSCSI)
+ */
+
+u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	/*
+	 * This value is defined for "Internet SCSI (iSCSI)"
+	 * in spc4r17 section 7.5.1 Table 362
+	 */
+	return 0x5;
+}
+EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
+
+u32 iscsi_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	u32 off = 4, padding = 0;
+	u16 len = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * Set PROTOCOL IDENTIFIER to 5h for iSCSI
+	*/
+	buf[0] = 0x05;
+	/*
+	 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+	 * ports using SCSI over iSCSI.
+	 *
+	 * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
+	 * shall contain the iSCSI name of an iSCSI initiator node (see
+	 * RFC 3720). The first ISCSI NAME field byte containing an ASCII
+	 * null character terminates the ISCSI NAME field without regard for
+	 * the specified length of the iSCSI TransportID or the contents of
+	 * the ADDITIONAL LENGTH field.
+	 */
+	len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
+	/*
+	 * Add Extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration and *format code == 1
+	 * 1, use iSCSI Initiator port TransportID format.
+	 *
+	 * Otherwise use iSCSI Initiator device TransportID format that
+	 * does not contain the ASCII encoded iSCSI Initiator iSID value
+	 * provied by the iSCSi Initiator during the iSCSI login process.
+	 */
+	if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
+		/*
+		 * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
+		 * format.
+		 */
+		buf[0] |= 0x40;
+		/*
+		 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+		 * ports using SCSI over iSCSI.  Table 390
+		 *
+		 * The SEPARATOR field shall contain the five ASCII
+		 * characters ",i,0x".
+		 *
+		 * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
+		 * field shall contain the iSCSI initiator session identifier
+		 * (see RFC 3720) in the form of ASCII characters that are the
+		 * hexadecimal digits converted from the binary iSCSI initiator
+		 * session identifier value. The first ISCSI INITIATOR SESSION
+		 * ID field byte containing an ASCII null character
+		 */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
+		buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
+		len += 5;
+		buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
+		buf[off+len] = '\0'; off++;
+		len += 7;
+	}
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	*/
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff);
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id);
+
+u32 iscsi_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	u32 len = 0, padding = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	len = strlen(se_nacl->initiatorname);
+	/*
+	 * Add extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration, use format code:
+	 * 01b: iSCSI Initiator port TransportID format
+	 *
+	 * If there is not an active iSCSI session, use format code:
+	 * 00b: iSCSI Initiator device TransportID format
+	 */
+	if (pr_reg->isid_present_at_reg) {
+		len += 5; /* For ",i,0x" ASCII seperator */
+		len += 7; /* For iSCSI Initiator Session ID + Null terminator */
+		*format_code = 1;
+	} else
+		*format_code = 0;
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	 */
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
+
+char *iscsi_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	char *p;
+	u32 tid_len, padding;
+	int i;
+	u16 add_len;
+	u8 format_code = (buf[0] & 0xc0);
+	/*
+	 * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
+	 *
+	 *       TransportID for initiator ports using SCSI over iSCSI,
+	 *       from Table 388 -- iSCSI TransportID formats.
+	 *
+	 *    00b     Initiator port is identified using the world wide unique
+	 *            SCSI device name of the iSCSI initiator
+	 *            device containing the initiator port (see table 389).
+	 *    01b     Initiator port is identified using the world wide unique
+	 *            initiator port identifier (see table 390).10b to 11b
+	 *            Reserved
+	 */
+	if ((format_code != 0x00) && (format_code != 0x40)) {
+		printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
+			" Initiator Transport ID\n", format_code);
+		return NULL;
+	}
+	/*
+	 * If the caller wants the TransportID Length, we set that value for the
+	 * entire iSCSI Tarnsport ID now.
+	 */
+	 if (out_tid_len != NULL) {
+		add_len = ((buf[2] >> 8) & 0xff);
+		add_len |= (buf[3] & 0xff);
+
+		tid_len = strlen((char *)&buf[4]);
+		tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
+		tid_len += 1; /* Add one byte for NULL terminator */
+		padding = ((-tid_len) & 3);
+		if (padding != 0)
+			tid_len += padding;
+
+		if ((add_len + 4) != tid_len) {
+			printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
+				"does not match calculated tid_len: %u,"
+				" using tid_len instead\n", add_len+4, tid_len);
+			*out_tid_len = tid_len;
+		} else
+			*out_tid_len = (add_len + 4);
+	}
+	/*
+	 * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
+	 * Session ID as defined in Table 390 - iSCSI initiator port TransportID
+	 * format.
+	 */
+	if (format_code == 0x40) {
+		p = strstr((char *)&buf[4], ",i,0x");
+		if (!(p)) {
+			printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
+				" for Initiator port identifier: %s\n",
+				(char *)&buf[4]);
+			return NULL;
+		}
+		*p = '\0'; /* Terminate iSCSI Name */
+		p += 5; /* Skip over ",i,0x" seperator */
+
+		*port_nexus_ptr = p;
+		/*
+		 * Go ahead and do the lower case conversion of the received
+		 * 12 ASCII characters representing the ISID in the TransportID
+		 * for comparision against the running iSCSI session's ISID from
+		 * iscsi_target.c:lio_sess_get_initiator_sid()
+		 */
+		for (i = 0; i < 12; i++) {
+			if (isdigit(*p)) {
+				p++;
+				continue;
+			}
+			*p = tolower(*p);
+			p++;
+		}
+	}
+
+	return (char *)&buf[4];
+}
+EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
new file mode 100644
index 0000000..0aaca88
--- /dev/null
+++ b/drivers/target/target_core_file.c
@@ -0,0 +1,688 @@
+/*******************************************************************************
+ * Filename:  target_core_file.c
+ *
+ * This file contains the Storage Engine <-> FILEIO transport specific functions
+ *
+ * Copyright (c) 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_file.h"
+
+#if 1
+#define DEBUG_FD_CACHE(x...) printk(x)
+#else
+#define DEBUG_FD_CACHE(x...)
+#endif
+
+#if 1
+#define DEBUG_FD_FUA(x...) printk(x)
+#else
+#define DEBUG_FD_FUA(x...)
+#endif
+
+static struct se_subsystem_api fileio_template;
+
+/*	fd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int fd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct fd_host *fd_host;
+
+	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
+	if (!(fd_host)) {
+		printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
+		return -1;
+	}
+
+	fd_host->fd_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) fd_host;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
+		TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+		" Target Core with TCQ Depth: %d MaxSectors: %u\n",
+		hba->hba_id, fd_host->fd_host_id,
+		atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
+
+	return 0;
+}
+
+static void fd_detach_hba(struct se_hba *hba)
+{
+	struct fd_host *fd_host = hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
+
+	kfree(fd_host);
+	hba->hba_ptr = NULL;
+}
+
+static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct fd_dev *fd_dev;
+	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+
+	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
+	if (!(fd_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
+		return NULL;
+	}
+
+	fd_dev->fd_host = fd_host;
+
+	printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
+
+	return fd_dev;
+}
+
+/*	fd_create_virtdevice(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static struct se_device *fd_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	char *dev_p = NULL;
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct queue_limits *limits;
+	struct fd_dev *fd_dev = (struct fd_dev *) p;
+	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+	mm_segment_t old_fs;
+	struct file *file;
+	struct inode *inode = NULL;
+	int dev_flags = 0, flags;
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	dev_p = getname(fd_dev->fd_dev_name);
+	set_fs(old_fs);
+
+	if (IS_ERR(dev_p)) {
+		printk(KERN_ERR "getname(%s) failed: %lu\n",
+			fd_dev->fd_dev_name, IS_ERR(dev_p));
+		goto fail;
+	}
+#if 0
+	if (di->no_create_file)
+		flags = O_RDWR | O_LARGEFILE;
+	else
+		flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#else
+	flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#endif
+/*	flags |= O_DIRECT; */
+	/*
+	 * If fd_buffered_io=1 has not been set explictly (the default),
+	 * use O_SYNC to force FILEIO writes to disk.
+	 */
+	if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
+		flags |= O_SYNC;
+
+	file = filp_open(dev_p, flags, 0600);
+
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+		goto fail;
+	}
+	fd_dev->fd_file = file;
+	/*
+	 * If using a block backend with this struct file, we extract
+	 * fd_dev->fd_[block,dev]_size from struct block_device.
+	 *
+	 * Otherwise, we use the passed fd_size= from configfs
+	 */
+	inode = file->f_mapping->host;
+	if (S_ISBLK(inode->i_mode)) {
+		struct request_queue *q;
+		/*
+		 * Setup the local scope queue_limits from struct request_queue->limits
+		 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+		 */
+		q = bdev_get_queue(inode->i_bdev);
+		limits = &dev_limits.limits;
+		limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
+		limits->max_hw_sectors = queue_max_hw_sectors(q);
+		limits->max_sectors = queue_max_sectors(q);
+		/*
+		 * Determine the number of bytes from i_size_read() minus
+		 * one (1) logical sector from underlying struct block_device
+		 */
+		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+		fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
+				       fd_dev->fd_block_size);
+
+		printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
+			" block_device blocks: %llu logical_block_size: %d\n",
+			fd_dev->fd_dev_size,
+			div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
+			fd_dev->fd_block_size);
+	} else {
+		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
+			printk(KERN_ERR "FILEIO: Missing fd_dev_size="
+				" parameter, and no backing struct"
+				" block_device\n");
+			goto fail;
+		}
+
+		limits = &dev_limits.limits;
+		limits->logical_block_size = FD_BLOCKSIZE;
+		limits->max_hw_sectors = FD_MAX_SECTORS;
+		limits->max_sectors = FD_MAX_SECTORS;
+		fd_dev->fd_block_size = FD_BLOCKSIZE;
+	}
+
+	dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+
+	dev = transport_add_device_to_core_hba(hba, &fileio_template,
+				se_dev, dev_flags, (void *)fd_dev,
+				&dev_limits, "FILEIO", FD_VERSION);
+	if (!(dev))
+		goto fail;
+
+	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+	fd_dev->fd_queue_depth = dev->queue_depth;
+
+	printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
+			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
+
+	putname(dev_p);
+	return dev;
+fail:
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+	putname(dev_p);
+	return NULL;
+}
+
+/*	fd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_device(void *p)
+{
+	struct fd_dev *fd_dev = (struct fd_dev *) p;
+
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+
+	kfree(fd_dev);
+}
+
+static inline struct fd_request *FILE_REQ(struct se_task *task)
+{
+	return container_of(task, struct fd_request, fd_task);
+}
+
+
+static struct se_task *
+fd_alloc_task(struct se_cmd *cmd)
+{
+	struct fd_request *fd_req;
+
+	fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
+	if (!(fd_req)) {
+		printk(KERN_ERR "Unable to allocate struct fd_request\n");
+		return NULL;
+	}
+
+	fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
+
+	return &fd_req->fd_task;
+}
+
+static int fd_do_readv(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+	struct file *fd = req->fd_dev->fd_file;
+	struct scatterlist *sg = task->task_sg;
+	struct iovec *iov;
+	mm_segment_t old_fs;
+	loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+	int ret = 0, i;
+
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+	if (!(iov)) {
+		printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
+		return -1;
+	}
+
+	for (i = 0; i < task->task_sg_num; i++) {
+		iov[i].iov_len = sg[i].length;
+		iov[i].iov_base = sg_virt(&sg[i]);
+	}
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
+	set_fs(old_fs);
+
+	kfree(iov);
+	/*
+	 * Return zeros and GOOD status even if the READ did not return
+	 * the expected virt_size for struct file w/o a backing struct
+	 * block_device.
+	 */
+	if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+		if (ret < 0 || ret != task->task_size) {
+			printk(KERN_ERR "vfs_readv() returned %d,"
+				" expecting %d for S_ISBLK\n", ret,
+				(int)task->task_size);
+			return -1;
+		}
+	} else {
+		if (ret < 0) {
+			printk(KERN_ERR "vfs_readv() returned %d for non"
+				" S_ISBLK\n", ret);
+			return -1;
+		}
+	}
+
+	return 1;
+}
+
+static int fd_do_writev(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+	struct file *fd = req->fd_dev->fd_file;
+	struct scatterlist *sg = task->task_sg;
+	struct iovec *iov;
+	mm_segment_t old_fs;
+	loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+	int ret, i = 0;
+
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+	if (!(iov)) {
+		printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
+		return -1;
+	}
+
+	for (i = 0; i < task->task_sg_num; i++) {
+		iov[i].iov_len = sg[i].length;
+		iov[i].iov_base = sg_virt(&sg[i]);
+	}
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
+	set_fs(old_fs);
+
+	kfree(iov);
+
+	if (ret < 0 || ret != task->task_size) {
+		printk(KERN_ERR "vfs_writev() returned %d\n", ret);
+		return -1;
+	}
+
+	return 1;
+}
+
+static void fd_emulate_sync_cache(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = cmd->se_dev;
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+	loff_t start, end;
+	int ret;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op
+	 */
+	if (immed)
+		transport_complete_sync_cache(cmd, 1);
+
+	/*
+	 * Determine if we will be flushing the entire device.
+	 */
+	if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
+		start = 0;
+		end = LLONG_MAX;
+	} else {
+		start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
+		if (cmd->data_length)
+			end = start + cmd->data_length;
+		else
+			end = LLONG_MAX;
+	}
+
+	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+	if (ret != 0)
+		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+
+	if (!immed)
+		transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int fd_emulated_write_cache(struct se_device *dev)
+{
+	return 1;
+}
+
+static int fd_emulated_dpo(struct se_device *dev)
+{
+	return 0;
+}
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int fd_emulated_fua_write(struct se_device *dev)
+{
+	return 1;
+}
+
+static int fd_emulated_fua_read(struct se_device *dev)
+{
+	return 0;
+}
+
+/*
+ * WRITE Force Unit Access (FUA) emulation on a per struct se_task
+ * LBA range basis..
+ */
+static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
+	loff_t end = start + task->task_size;
+	int ret;
+
+	DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+			task->task_lba, task->task_size);
+
+	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+	if (ret != 0)
+		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+}
+
+static int fd_do_task(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	int ret = 0;
+
+	/*
+	 * Call vectorized fileio functions to map struct scatterlist
+	 * physical memory addresses to struct iovec virtual memory.
+	 */
+	if (task->task_data_direction == DMA_FROM_DEVICE) {
+		ret = fd_do_readv(task);
+	} else {
+		ret = fd_do_writev(task);
+
+		if (ret > 0 &&
+		    DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
+		    DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+		    T_TASK(cmd)->t_tasks_fua) {
+			/*
+			 * We might need to be a bit smarter here
+			 * and return some sense data to let the initiator
+			 * know the FUA WRITE cache sync failed..?
+			 */
+			fd_emulate_write_fua(cmd, task);
+		}
+
+	}
+
+	if (ret < 0)
+		return ret;
+	if (ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	fd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_task(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+
+	kfree(req);
+}
+
+enum {
+	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_fd_dev_name, "fd_dev_name=%s"},
+	{Opt_fd_dev_size, "fd_dev_size=%s"},
+	{Opt_fd_buffered_io, "fd_buffered_id=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t fd_set_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page, ssize_t count)
+{
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_fd_dev_name:
+			snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
+					"%s", match_strdup(&args[0]));
+			printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
+					fd_dev->fd_dev_name);
+			fd_dev->fbd_flags |= FBDF_HAS_PATH;
+			break;
+		case Opt_fd_dev_size:
+			arg_p = match_strdup(&args[0]);
+			ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+			if (ret < 0) {
+				printk(KERN_ERR "strict_strtoull() failed for"
+						" fd_dev_size=\n");
+				goto out;
+			}
+			printk(KERN_INFO "FILEIO: Referencing Size: %llu"
+					" bytes\n", fd_dev->fd_dev_size);
+			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+			break;
+		case Opt_fd_buffered_io:
+			match_int(args, &arg);
+			if (arg != 1) {
+				printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
+				ret = -EINVAL;
+				goto out;
+			}
+
+			printk(KERN_INFO "FILEIO: Using buffered I/O"
+				" operations for struct fd_dev\n");
+
+			fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+	struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
+
+	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+		printk(KERN_ERR "Missing fd_dev_name=\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t fd_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+	ssize_t bl = 0;
+
+	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
+		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
+		(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
+		"Buffered" : "Synchronous");
+	return bl;
+}
+
+/*	fd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *fd_get_cdb(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+
+	return req->fd_scsi_cdb;
+}
+
+/*	fd_get_device_rev(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+/*	fd_get_device_type(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t fd_get_blocks(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
+			DEV_ATTRIB(dev)->block_size);
+
+	return blocks_long;
+}
+
+static struct se_subsystem_api fileio_template = {
+	.name			= "fileio",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
+	.attach_hba		= fd_attach_hba,
+	.detach_hba		= fd_detach_hba,
+	.allocate_virtdevice	= fd_allocate_virtdevice,
+	.create_virtdevice	= fd_create_virtdevice,
+	.free_device		= fd_free_device,
+	.dpo_emulated		= fd_emulated_dpo,
+	.fua_write_emulated	= fd_emulated_fua_write,
+	.fua_read_emulated	= fd_emulated_fua_read,
+	.write_cache_emulated	= fd_emulated_write_cache,
+	.alloc_task		= fd_alloc_task,
+	.do_task		= fd_do_task,
+	.do_sync_cache		= fd_emulate_sync_cache,
+	.free_task		= fd_free_task,
+	.check_configfs_dev_params = fd_check_configfs_dev_params,
+	.set_configfs_dev_params = fd_set_configfs_dev_params,
+	.show_configfs_dev_params = fd_show_configfs_dev_params,
+	.get_cdb		= fd_get_cdb,
+	.get_device_rev		= fd_get_device_rev,
+	.get_device_type	= fd_get_device_type,
+	.get_blocks		= fd_get_blocks,
+};
+
+static int __init fileio_module_init(void)
+{
+	return transport_subsystem_register(&fileio_template);
+}
+
+static void fileio_module_exit(void)
+{
+	transport_subsystem_release(&fileio_template);
+}
+
+MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(fileio_module_init);
+module_exit(fileio_module_exit);
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
new file mode 100644
index 0000000..ef4de2b
--- /dev/null
+++ b/drivers/target/target_core_file.h
@@ -0,0 +1,50 @@
+#ifndef TARGET_CORE_FILE_H
+#define TARGET_CORE_FILE_H
+
+#define FD_VERSION		"4.0"
+
+#define FD_MAX_DEV_NAME		256
+/* Maximum queuedepth for the FILEIO HBA */
+#define FD_HBA_QUEUE_DEPTH	256
+#define FD_DEVICE_QUEUE_DEPTH	32
+#define FD_MAX_DEVICE_QUEUE_DEPTH 128
+#define FD_BLOCKSIZE		512
+#define FD_MAX_SECTORS		1024
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+struct fd_request {
+	struct se_task	fd_task;
+	/* SCSI CDB from iSCSI Command PDU */
+	unsigned char	fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+	/* FILEIO device */
+	struct fd_dev	*fd_dev;
+} ____cacheline_aligned;
+
+#define FBDF_HAS_PATH		0x01
+#define FBDF_HAS_SIZE		0x02
+#define FDBD_USE_BUFFERED_IO	0x04
+
+struct fd_dev {
+	u32		fbd_flags;
+	unsigned char	fd_dev_name[FD_MAX_DEV_NAME];
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		fd_dev_id;
+	/* Number of SG tables in sg_table_array */
+	u32		fd_table_count;
+	u32		fd_queue_depth;
+	u32		fd_block_size;
+	unsigned long long fd_dev_size;
+	struct file	*fd_file;
+	/* FILEIO HBA device is connected to */
+	struct fd_host *fd_host;
+} ____cacheline_aligned;
+
+struct fd_host {
+	u32		fd_host_dev_id_count;
+	/* Unique FILEIO Host ID */
+	u32		fd_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_FILE_H */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
new file mode 100644
index 0000000..4bbe820
--- /dev/null
+++ b/drivers/target/target_core_hba.c
@@ -0,0 +1,185 @@
+/*******************************************************************************
+ * Filename:  target_core_hba.c
+ *
+ * This file copntains the iSCSI HBA Transport related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_hba.h"
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(subsystem_mutex);
+
+int transport_subsystem_register(struct se_subsystem_api *sub_api)
+{
+	struct se_subsystem_api *s;
+
+	INIT_LIST_HEAD(&sub_api->sub_api_list);
+
+	mutex_lock(&subsystem_mutex);
+	list_for_each_entry(s, &subsystem_list, sub_api_list) {
+		if (!(strcmp(s->name, sub_api->name))) {
+			printk(KERN_ERR "%p is already registered with"
+				" duplicate name %s, unable to process"
+				" request\n", s, s->name);
+			mutex_unlock(&subsystem_mutex);
+			return -EEXIST;
+		}
+	}
+	list_add_tail(&sub_api->sub_api_list, &subsystem_list);
+	mutex_unlock(&subsystem_mutex);
+
+	printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
+			" %p\n", sub_api->name, sub_api->owner);
+	return 0;
+}
+EXPORT_SYMBOL(transport_subsystem_register);
+
+void transport_subsystem_release(struct se_subsystem_api *sub_api)
+{
+	mutex_lock(&subsystem_mutex);
+	list_del(&sub_api->sub_api_list);
+	mutex_unlock(&subsystem_mutex);
+}
+EXPORT_SYMBOL(transport_subsystem_release);
+
+static struct se_subsystem_api *core_get_backend(const char *sub_name)
+{
+	struct se_subsystem_api *s;
+
+	mutex_lock(&subsystem_mutex);
+	list_for_each_entry(s, &subsystem_list, sub_api_list) {
+		if (!strcmp(s->name, sub_name))
+			goto found;
+	}
+	mutex_unlock(&subsystem_mutex);
+	return NULL;
+found:
+	if (s->owner && !try_module_get(s->owner))
+		s = NULL;
+	mutex_unlock(&subsystem_mutex);
+	return s;
+}
+
+struct se_hba *
+core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
+{
+	struct se_hba *hba;
+	int ret = 0;
+
+	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+	if (!hba) {
+		printk(KERN_ERR "Unable to allocate struct se_hba\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	INIT_LIST_HEAD(&hba->hba_dev_list);
+	spin_lock_init(&hba->device_lock);
+	spin_lock_init(&hba->hba_queue_lock);
+	mutex_init(&hba->hba_access_mutex);
+
+	hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
+	hba->hba_flags |= hba_flags;
+
+	atomic_set(&hba->max_queue_depth, 0);
+	atomic_set(&hba->left_queue_depth, 0);
+
+	hba->transport = core_get_backend(plugin_name);
+	if (!hba->transport) {
+		ret = -EINVAL;
+		goto out_free_hba;
+	}
+
+	ret = hba->transport->attach_hba(hba, plugin_dep_id);
+	if (ret < 0)
+		goto out_module_put;
+
+	spin_lock(&se_global->hba_lock);
+	hba->hba_id = se_global->g_hba_id_counter++;
+	list_add_tail(&hba->hba_list, &se_global->g_hba_list);
+	spin_unlock(&se_global->hba_lock);
+
+	printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
+			" Core\n", hba->hba_id);
+
+	return hba;
+
+out_module_put:
+	if (hba->transport->owner)
+		module_put(hba->transport->owner);
+	hba->transport = NULL;
+out_free_hba:
+	kfree(hba);
+	return ERR_PTR(ret);
+}
+
+int
+core_delete_hba(struct se_hba *hba)
+{
+	struct se_device *dev, *dev_tmp;
+
+	spin_lock(&hba->device_lock);
+	list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
+
+		se_clear_dev_ports(dev);
+		spin_unlock(&hba->device_lock);
+
+		se_release_device_for_hba(dev);
+
+		spin_lock(&hba->device_lock);
+	}
+	spin_unlock(&hba->device_lock);
+
+	hba->transport->detach_hba(hba);
+
+	spin_lock(&se_global->hba_lock);
+	list_del(&hba->hba_list);
+	spin_unlock(&se_global->hba_lock);
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
+			" Core\n", hba->hba_id);
+
+	if (hba->transport->owner)
+		module_put(hba->transport->owner);
+
+	hba->transport = NULL;
+	kfree(hba);
+	return 0;
+}
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
new file mode 100644
index 0000000..bb0fea5
--- /dev/null
+++ b/drivers/target/target_core_hba.h
@@ -0,0 +1,7 @@
+#ifndef TARGET_CORE_HBA_H
+#define TARGET_CORE_HBA_H
+
+extern struct se_hba *core_alloc_hba(const char *, u32, u32);
+extern int core_delete_hba(struct se_hba *);
+
+#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
new file mode 100644
index 0000000..c6e0d75
--- /dev/null
+++ b/drivers/target/target_core_iblock.c
@@ -0,0 +1,808 @@
+/*******************************************************************************
+ * Filename:  target_core_iblock.c
+ *
+ * This file contains the Storage Engine  <-> Linux BlockIO transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/bio.h>
+#include <linux/genhd.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_iblock.h"
+
+#if 0
+#define DEBUG_IBLOCK(x...) printk(x)
+#else
+#define DEBUG_IBLOCK(x...)
+#endif
+
+static struct se_subsystem_api iblock_template;
+
+static void iblock_bio_done(struct bio *, int);
+
+/*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct iblock_hba *ib_host;
+
+	ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
+	if (!(ib_host)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct iblock_hba\n");
+		return -ENOMEM;
+	}
+
+	ib_host->iblock_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) ib_host;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+
+	printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
+		" Target Core TCQ Depth: %d\n", hba->hba_id,
+		ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
+
+	return 0;
+}
+
+static void iblock_detach_hba(struct se_hba *hba)
+{
+	struct iblock_hba *ib_host = hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
+		" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
+
+	kfree(ib_host);
+	hba->hba_ptr = NULL;
+}
+
+static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct iblock_dev *ib_dev = NULL;
+	struct iblock_hba *ib_host = hba->hba_ptr;
+
+	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
+	if (!(ib_dev)) {
+		printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
+		return NULL;
+	}
+	ib_dev->ibd_host = ib_host;
+
+	printk(KERN_INFO  "IBLOCK: Allocated ib_dev for %s\n", name);
+
+	return ib_dev;
+}
+
+static struct se_device *iblock_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	struct iblock_dev *ib_dev = p;
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct block_device *bd = NULL;
+	struct request_queue *q;
+	struct queue_limits *limits;
+	u32 dev_flags = 0;
+
+	if (!(ib_dev)) {
+		printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
+		return 0;
+	}
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+	/*
+	 * These settings need to be made tunable..
+	 */
+	ib_dev->ibd_bio_set = bioset_create(32, 64);
+	if (!(ib_dev->ibd_bio_set)) {
+		printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
+		return 0;
+	}
+	printk(KERN_INFO "IBLOCK: Created bio_set()\n");
+	/*
+	 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
+	 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
+	 */
+	printk(KERN_INFO  "IBLOCK: Claiming struct block_device: %s\n",
+			ib_dev->ibd_udev_path);
+
+	bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
+				FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
+	if (!(bd))
+		goto failed;
+	/*
+	 * Setup the local scope queue_limits from struct request_queue->limits
+	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+	 */
+	q = bdev_get_queue(bd);
+	limits = &dev_limits.limits;
+	limits->logical_block_size = bdev_logical_block_size(bd);
+	limits->max_hw_sectors = queue_max_hw_sectors(q);
+	limits->max_sectors = queue_max_sectors(q);
+	dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
+
+	ib_dev->ibd_major = MAJOR(bd->bd_dev);
+	ib_dev->ibd_minor = MINOR(bd->bd_dev);
+	ib_dev->ibd_bd = bd;
+
+	dev = transport_add_device_to_core_hba(hba,
+			&iblock_template, se_dev, dev_flags, (void *)ib_dev,
+			&dev_limits, "IBLOCK", IBLOCK_VERSION);
+	if (!(dev))
+		goto failed;
+
+	ib_dev->ibd_depth = dev->queue_depth;
+
+	/*
+	 * Check if the underlying struct block_device request_queue supports
+	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+	 * in ATA and we need to set TPE=1
+	 */
+	if (blk_queue_discard(bdev_get_queue(bd))) {
+		struct request_queue *q = bdev_get_queue(bd);
+
+		DEV_ATTRIB(dev)->max_unmap_lba_count =
+				q->limits.max_discard_sectors;
+		/*
+		 * Currently hardcoded to 1 in Linux/SCSI code..
+		 */
+		DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
+		DEV_ATTRIB(dev)->unmap_granularity =
+				q->limits.discard_granularity;
+		DEV_ATTRIB(dev)->unmap_granularity_alignment =
+				q->limits.discard_alignment;
+
+		printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
+				" disabled by default\n");
+	}
+
+	return dev;
+
+failed:
+	if (ib_dev->ibd_bio_set) {
+		bioset_free(ib_dev->ibd_bio_set);
+		ib_dev->ibd_bio_set = NULL;
+	}
+	ib_dev->ibd_bd = NULL;
+	ib_dev->ibd_major = 0;
+	ib_dev->ibd_minor = 0;
+	return NULL;
+}
+
+static void iblock_free_device(void *p)
+{
+	struct iblock_dev *ib_dev = p;
+
+	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+	bioset_free(ib_dev->ibd_bio_set);
+	kfree(ib_dev);
+}
+
+static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
+{
+	return container_of(task, struct iblock_req, ib_task);
+}
+
+static struct se_task *
+iblock_alloc_task(struct se_cmd *cmd)
+{
+	struct iblock_req *ib_req;
+
+	ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+	if (!(ib_req)) {
+		printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
+		return NULL;
+	}
+
+	ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
+	atomic_set(&ib_req->ib_bio_cnt, 0);
+	return &ib_req->ib_task;
+}
+
+static unsigned long long iblock_emulate_read_cap_with_block_size(
+	struct se_device *dev,
+	struct block_device *bd,
+	struct request_queue *q)
+{
+	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
+					bdev_logical_block_size(bd)) - 1);
+	u32 block_size = bdev_logical_block_size(bd);
+
+	if (block_size == DEV_ATTRIB(dev)->block_size)
+		return blocks_long;
+
+	switch (block_size) {
+	case 4096:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 2048:
+			blocks_long <<= 1;
+			break;
+		case 1024:
+			blocks_long <<= 2;
+			break;
+		case 512:
+			blocks_long <<= 3;
+		default:
+			break;
+		}
+		break;
+	case 2048:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 4096:
+			blocks_long >>= 1;
+			break;
+		case 1024:
+			blocks_long <<= 1;
+			break;
+		case 512:
+			blocks_long <<= 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 1024:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 4096:
+			blocks_long >>= 2;
+			break;
+		case 2048:
+			blocks_long >>= 1;
+			break;
+		case 512:
+			blocks_long <<= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 512:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 4096:
+			blocks_long >>= 3;
+			break;
+		case 2048:
+			blocks_long >>= 2;
+			break;
+		case 1024:
+			blocks_long >>= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return blocks_long;
+}
+
+/*
+ * Emulate SYCHRONIZE_CACHE_*
+ */
+static void iblock_emulate_sync_cache(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+	int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
+	sector_t error_sector;
+	int ret;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op
+	 */
+	if (immed)
+		transport_complete_sync_cache(cmd, 1);
+
+	/*
+	 * blkdev_issue_flush() does not support a specifying a range, so
+	 * we have to flush the entire cache.
+	 */
+	ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
+	if (ret != 0) {
+		printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
+			" error_sector: %llu\n", ret,
+			(unsigned long long)error_sector);
+	}
+
+	if (!immed)
+		transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int iblock_emulated_write_cache(struct se_device *dev)
+{
+	return 1;
+}
+
+static int iblock_emulated_dpo(struct se_device *dev)
+{
+	return 0;
+}
+
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int iblock_emulated_fua_write(struct se_device *dev)
+{
+	return 1;
+}
+
+static int iblock_emulated_fua_read(struct se_device *dev)
+{
+	return 0;
+}
+
+static int iblock_do_task(struct se_task *task)
+{
+	struct se_device *dev = task->task_se_cmd->se_dev;
+	struct iblock_req *req = IBLOCK_REQ(task);
+	struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
+	struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
+	struct bio *bio = req->ib_bio, *nbio = NULL;
+	int rw;
+
+	if (task->task_data_direction == DMA_TO_DEVICE) {
+		/*
+		 * Force data to disk if we pretend to not have a volatile
+		 * write cache, or the initiator set the Force Unit Access bit.
+		 */
+		if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
+		    (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+		     T_TASK(task->task_se_cmd)->t_tasks_fua))
+			rw = WRITE_FUA;
+		else
+			rw = WRITE;
+	} else {
+		rw = READ;
+	}
+
+	while (bio) {
+		nbio = bio->bi_next;
+		bio->bi_next = NULL;
+		DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
+			" bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
+
+		submit_bio(rw, bio);
+		bio = nbio;
+	}
+
+	if (q->unplug_fn)
+		q->unplug_fn(q);
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
+{
+	struct iblock_dev *ibd = dev->dev_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	int barrier = 0;
+
+	return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
+}
+
+static void iblock_free_task(struct se_task *task)
+{
+	struct iblock_req *req = IBLOCK_REQ(task);
+	struct bio *bio, *hbio = req->ib_bio;
+	/*
+	 * We only release the bio(s) here if iblock_bio_done() has not called
+	 * bio_put() -> iblock_bio_destructor().
+	 */
+	while (hbio != NULL) {
+		bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_put(bio);
+	}
+
+	kfree(req);
+}
+
+enum {
+	Opt_udev_path, Opt_force, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_udev_path, "udev_path=%s"},
+	{Opt_force, "force=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
+					       struct se_subsystem_dev *se_dev,
+					       const char *page, ssize_t count)
+{
+	struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_udev_path:
+			if (ib_dev->ibd_bd) {
+				printk(KERN_ERR "Unable to set udev_path= while"
+					" ib_dev->ibd_bd exists\n");
+				ret = -EEXIST;
+				goto out;
+			}
+
+			ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
+				"%s", match_strdup(&args[0]));
+			printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
+					ib_dev->ibd_udev_path);
+			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+			break;
+		case Opt_force:
+			match_int(args, &arg);
+			ib_dev->ibd_force = arg;
+			printk(KERN_INFO "IBLOCK: Set force=%d\n",
+				ib_dev->ibd_force);
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t iblock_check_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev)
+{
+	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+
+	if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+		printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t iblock_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	char buf[BDEVNAME_SIZE];
+	ssize_t bl = 0;
+
+	if (bd)
+		bl += sprintf(b + bl, "iBlock device: %s",
+				bdevname(bd, buf));
+	if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
+		bl += sprintf(b + bl, "  UDEV PATH: %s\n",
+				ibd->ibd_udev_path);
+	} else
+		bl += sprintf(b + bl, "\n");
+
+	bl += sprintf(b + bl, "        ");
+	if (bd) {
+		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
+			ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
+			"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
+			"CLAIMED: IBLOCK" : "CLAIMED: OS");
+	} else {
+		bl += sprintf(b + bl, "Major: %d Minor: %d\n",
+			ibd->ibd_major, ibd->ibd_minor);
+	}
+
+	return bl;
+}
+
+static void iblock_bio_destructor(struct bio *bio)
+{
+	struct se_task *task = bio->bi_private;
+	struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+
+	bio_free(bio, ib_dev->ibd_bio_set);
+}
+
+static struct bio *iblock_get_bio(
+	struct se_task *task,
+	struct iblock_req *ib_req,
+	struct iblock_dev *ib_dev,
+	int *ret,
+	sector_t lba,
+	u32 sg_num)
+{
+	struct bio *bio;
+
+	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+	if (!(bio)) {
+		printk(KERN_ERR "Unable to allocate memory for bio\n");
+		*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+		return NULL;
+	}
+
+	DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
+		" %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
+	DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+
+	bio->bi_bdev = ib_dev->ibd_bd;
+	bio->bi_private = (void *) task;
+	bio->bi_destructor = iblock_bio_destructor;
+	bio->bi_end_io = &iblock_bio_done;
+	bio->bi_sector = lba;
+	atomic_inc(&ib_req->ib_bio_cnt);
+
+	DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
+	DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
+			atomic_read(&ib_req->ib_bio_cnt));
+	return bio;
+}
+
+static int iblock_map_task_SG(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = SE_DEV(cmd);
+	struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+	struct iblock_req *ib_req = IBLOCK_REQ(task);
+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+	struct scatterlist *sg;
+	int ret = 0;
+	u32 i, sg_num = task->task_sg_num;
+	sector_t block_lba;
+	/*
+	 * Do starting conversion up from non 512-byte blocksize with
+	 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
+	 */
+	if (DEV_ATTRIB(dev)->block_size == 4096)
+		block_lba = (task->task_lba << 3);
+	else if (DEV_ATTRIB(dev)->block_size == 2048)
+		block_lba = (task->task_lba << 2);
+	else if (DEV_ATTRIB(dev)->block_size == 1024)
+		block_lba = (task->task_lba << 1);
+	else if (DEV_ATTRIB(dev)->block_size == 512)
+		block_lba = task->task_lba;
+	else {
+		printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
+				" %u\n", DEV_ATTRIB(dev)->block_size);
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+
+	bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
+	if (!(bio))
+		return ret;
+
+	ib_req->ib_bio = bio;
+	hbio = tbio = bio;
+	/*
+	 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
+	 * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
+	 */
+	for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
+		DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+			" %p len: %u offset: %u\n", task, bio, sg_page(sg),
+				sg->length, sg->offset);
+again:
+		ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
+		if (ret != sg->length) {
+
+			DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
+					bio->bi_sector);
+			DEBUG_IBLOCK("** task->task_size: %u\n",
+					task->task_size);
+			DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
+					bio->bi_max_vecs);
+			DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
+					bio->bi_vcnt);
+
+			bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
+						block_lba, sg_num);
+			if (!(bio))
+				goto fail;
+
+			tbio = tbio->bi_next = bio;
+			DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
+				" list, Going to again\n", bio);
+			goto again;
+		}
+		/* Always in 512 byte units for Linux/Block */
+		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+		sg_num--;
+		DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
+			" sg_num to %u\n", task, sg_num);
+		DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
+				" to %llu\n", task, block_lba);
+		DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
+				" %u\n", task, bio->bi_vcnt);
+	}
+
+	return 0;
+fail:
+	while (hbio) {
+		bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_put(bio);
+	}
+	return ret;
+}
+
+static unsigned char *iblock_get_cdb(struct se_task *task)
+{
+	return IBLOCK_REQ(task)->ib_scsi_cdb;
+}
+
+static u32 iblock_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 iblock_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t iblock_get_blocks(struct se_device *dev)
+{
+	struct iblock_dev *ibd = dev->dev_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	struct request_queue *q = bdev_get_queue(bd);
+
+	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+	struct se_task *task = bio->bi_private;
+	struct iblock_req *ibr = IBLOCK_REQ(task);
+	/*
+	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+	 */
+	if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
+		err = -EIO;
+
+	if (err != 0) {
+		printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
+			" err: %d\n", bio, err);
+		/*
+		 * Bump the ib_bio_err_cnt and release bio.
+		 */
+		atomic_inc(&ibr->ib_bio_err_cnt);
+		smp_mb__after_atomic_inc();
+		bio_put(bio);
+		/*
+		 * Wait to complete the task until the last bio as completed.
+		 */
+		if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+			return;
+
+		ibr->ib_bio = NULL;
+		transport_complete_task(task, 0);
+		return;
+	}
+	DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+		task, bio, task->task_lba, bio->bi_sector, err);
+	/*
+	 * bio_put() will call iblock_bio_destructor() to release the bio back
+	 * to ibr->ib_bio_set.
+	 */
+	bio_put(bio);
+	/*
+	 * Wait to complete the task until the last bio as completed.
+	 */
+	if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+		return;
+	/*
+	 * Return GOOD status for task if zero ib_bio_err_cnt exists.
+	 */
+	ibr->ib_bio = NULL;
+	transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
+}
+
+static struct se_subsystem_api iblock_template = {
+	.name			= "iblock",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
+	.map_task_SG		= iblock_map_task_SG,
+	.attach_hba		= iblock_attach_hba,
+	.detach_hba		= iblock_detach_hba,
+	.allocate_virtdevice	= iblock_allocate_virtdevice,
+	.create_virtdevice	= iblock_create_virtdevice,
+	.free_device		= iblock_free_device,
+	.dpo_emulated		= iblock_emulated_dpo,
+	.fua_write_emulated	= iblock_emulated_fua_write,
+	.fua_read_emulated	= iblock_emulated_fua_read,
+	.write_cache_emulated	= iblock_emulated_write_cache,
+	.alloc_task		= iblock_alloc_task,
+	.do_task		= iblock_do_task,
+	.do_discard		= iblock_do_discard,
+	.do_sync_cache		= iblock_emulate_sync_cache,
+	.free_task		= iblock_free_task,
+	.check_configfs_dev_params = iblock_check_configfs_dev_params,
+	.set_configfs_dev_params = iblock_set_configfs_dev_params,
+	.show_configfs_dev_params = iblock_show_configfs_dev_params,
+	.get_cdb		= iblock_get_cdb,
+	.get_device_rev		= iblock_get_device_rev,
+	.get_device_type	= iblock_get_device_type,
+	.get_blocks		= iblock_get_blocks,
+};
+
+static int __init iblock_module_init(void)
+{
+	return transport_subsystem_register(&iblock_template);
+}
+
+static void iblock_module_exit(void)
+{
+	transport_subsystem_release(&iblock_template);
+}
+
+MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iblock_module_init);
+module_exit(iblock_module_exit);
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
new file mode 100644
index 0000000..64c1f4d
--- /dev/null
+++ b/drivers/target/target_core_iblock.h
@@ -0,0 +1,40 @@
+#ifndef TARGET_CORE_IBLOCK_H
+#define TARGET_CORE_IBLOCK_H
+
+#define IBLOCK_VERSION		"4.0"
+
+#define IBLOCK_HBA_QUEUE_DEPTH	512
+#define IBLOCK_DEVICE_QUEUE_DEPTH	32
+#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH	128
+#define IBLOCK_MAX_CDBS		16
+#define IBLOCK_LBA_SHIFT	9
+
+struct iblock_req {
+	struct se_task ib_task;
+	unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+	atomic_t ib_bio_cnt;
+	atomic_t ib_bio_err_cnt;
+	struct bio *ib_bio;
+	struct iblock_dev *ib_dev;
+} ____cacheline_aligned;
+
+#define IBDF_HAS_UDEV_PATH		0x01
+#define IBDF_HAS_FORCE			0x02
+
+struct iblock_dev {
+	unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
+	int	ibd_force;
+	int	ibd_major;
+	int	ibd_minor;
+	u32	ibd_depth;
+	u32	ibd_flags;
+	struct bio_set	*ibd_bio_set;
+	struct block_device *ibd_bd;
+	struct iblock_hba *ibd_host;
+} ____cacheline_aligned;
+
+struct iblock_hba {
+	int		iblock_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
new file mode 100644
index 0000000..d5a48aa
--- /dev/null
+++ b/drivers/target/target_core_mib.c
@@ -0,0 +1,1078 @@
+/*******************************************************************************
+ * Filename:  target_core_mib.c
+ *
+ * Copyright (c) 2006-2007 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_mib.h"
+
+/* SCSI mib table index */
+static struct scsi_index_table scsi_index_table;
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+/* SCSI Instance Table */
+#define SCSI_INST_SW_INDEX		1
+#define SCSI_TRANSPORT_INDEX		1
+
+#define NONE		"None"
+#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
+
+static inline int list_is_first(const struct list_head *list,
+				const struct list_head *head)
+{
+	return list->prev == head;
+}
+
+static void *locate_hba_start(
+	struct seq_file *seq,
+	loff_t *pos)
+{
+	spin_lock(&se_global->g_device_lock);
+	return seq_list_start(&se_global->g_se_dev_list, *pos);
+}
+
+static void *locate_hba_next(
+	struct seq_file *seq,
+	void *v,
+	loff_t *pos)
+{
+	return seq_list_next(v, &se_global->g_se_dev_list, pos);
+}
+
+static void locate_hba_stop(struct seq_file *seq, void *v)
+{
+	spin_unlock(&se_global->g_device_lock);
+}
+
+/****************************************************************************
+ * SCSI MIB Tables
+ ****************************************************************************/
+
+/*
+ * SCSI Instance Table
+ */
+static void *scsi_inst_seq_start(
+	struct seq_file *seq,
+	loff_t *pos)
+{
+	spin_lock(&se_global->hba_lock);
+	return seq_list_start(&se_global->g_hba_list, *pos);
+}
+
+static void *scsi_inst_seq_next(
+	struct seq_file *seq,
+	void *v,
+	loff_t *pos)
+{
+	return seq_list_next(v, &se_global->g_hba_list, pos);
+}
+
+static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
+{
+	spin_unlock(&se_global->hba_lock);
+}
+
+static int scsi_inst_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
+
+	if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
+		seq_puts(seq, "inst sw_indx\n");
+
+	seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
+	seq_printf(seq, "plugin: %s version: %s\n",
+			hba->transport->name, TARGET_CORE_VERSION);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_inst_seq_ops = {
+	.start	= scsi_inst_seq_start,
+	.next	= scsi_inst_seq_next,
+	.stop	= scsi_inst_seq_stop,
+	.show	= scsi_inst_seq_show
+};
+
+static int scsi_inst_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_inst_seq_ops);
+}
+
+static const struct file_operations scsi_inst_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_inst_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Device Table
+ */
+static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+static int scsi_dev_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	char str[28];
+	int k;
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst indx role ports\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
+		   dev->dev_index, "Target", dev->dev_port_count);
+
+	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+
+	/* vendor */
+	for (k = 0; k < 8; k++)
+		str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
+				DEV_T10_WWN(dev)->vendor[k] : 0x20;
+	str[k] = 0x20;
+
+	/* model */
+	for (k = 0; k < 16; k++)
+		str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
+				DEV_T10_WWN(dev)->model[k] : 0x20;
+	str[k + 9] = 0;
+
+	seq_printf(seq, "dev_alias: %s\n", str);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_dev_seq_ops = {
+	.start  = scsi_dev_seq_start,
+	.next   = scsi_dev_seq_next,
+	.stop   = scsi_dev_seq_stop,
+	.show   = scsi_dev_seq_show
+};
+
+static int scsi_dev_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_dev_seq_ops);
+}
+
+static const struct file_operations scsi_dev_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_dev_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Port Table
+ */
+static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_port_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+static int scsi_port_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	struct se_port *sep, *sep_tmp;
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst device indx role busy_count\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	/* FIXME: scsiPortBusyStatuses count */
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+		seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
+			dev->dev_index, sep->sep_index, "Device",
+			dev->dev_index, 0);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_port_seq_ops = {
+	.start  = scsi_port_seq_start,
+	.next   = scsi_port_seq_next,
+	.stop   = scsi_port_seq_stop,
+	.show   = scsi_port_seq_show
+};
+
+static int scsi_port_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_port_seq_ops);
+}
+
+static const struct file_operations scsi_port_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_port_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Transport Table
+ */
+static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+static int scsi_transport_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	struct se_port *se, *se_tmp;
+	struct se_portal_group *tpg;
+	struct t10_wwn *wwn;
+	char buf[64];
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst device indx dev_name\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	wwn = DEV_T10_WWN(dev);
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
+		tpg = se->sep_tpg;
+		sprintf(buf, "scsiTransport%s",
+				TPG_TFO(tpg)->get_fabric_name());
+
+		seq_printf(seq, "%u %s %u %s+%s\n",
+			hba->hba_index, /* scsiTransportIndex */
+			buf,  /* scsiTransportType */
+			(TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
+			TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
+			0,
+			TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			(strlen(wwn->unit_serial)) ?
+			/* scsiTransportDevName */
+			wwn->unit_serial : wwn->vendor);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_transport_seq_ops = {
+	.start  = scsi_transport_seq_start,
+	.next   = scsi_transport_seq_next,
+	.stop   = scsi_transport_seq_stop,
+	.show   = scsi_transport_seq_show
+};
+
+static int scsi_transport_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_transport_seq_ops);
+}
+
+static const struct file_operations scsi_transport_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_transport_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Target Device Table
+ */
+static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+
+#define LU_COUNT	1  /* for now */
+static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	int non_accessible_lus = 0;
+	char status[16];
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst indx num_LUs status non_access_LUs"
+			" resets\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		strcpy(status, "activated");
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+		strcpy(status, "deactivated");
+		non_accessible_lus = 1;
+		break;
+	case TRANSPORT_DEVICE_SHUTDOWN:
+		strcpy(status, "shutdown");
+		non_accessible_lus = 1;
+		break;
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+		strcpy(status, "offline");
+		non_accessible_lus = 1;
+		break;
+	default:
+		sprintf(status, "unknown(%d)", dev->dev_status);
+		non_accessible_lus = 1;
+	}
+
+	seq_printf(seq, "%u %u %u %s %u %u\n",
+		   hba->hba_index, dev->dev_index, LU_COUNT,
+		   status, non_accessible_lus, dev->num_resets);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_tgt_dev_seq_ops = {
+	.start  = scsi_tgt_dev_seq_start,
+	.next   = scsi_tgt_dev_seq_next,
+	.stop   = scsi_tgt_dev_seq_stop,
+	.show   = scsi_tgt_dev_seq_show
+};
+
+static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_tgt_dev_seq_ops);
+}
+
+static const struct file_operations scsi_tgt_dev_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_tgt_dev_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Target Port Table
+ */
+static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	struct se_port *sep, *sep_tmp;
+	struct se_portal_group *tpg;
+	u32 rx_mbytes, tx_mbytes;
+	unsigned long long num_cmds;
+	char buf[64];
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst device indx name port_index in_cmds"
+			" write_mbytes read_mbytes hs_in_cmds\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+		tpg = sep->sep_tpg;
+		sprintf(buf, "%sPort#",
+			TPG_TFO(tpg)->get_fabric_name());
+
+		seq_printf(seq, "%u %u %u %s%d %s%s%d ",
+		     hba->hba_index,
+		     dev->dev_index,
+		     sep->sep_index,
+		     buf, sep->sep_index,
+		     TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
+		     TPG_TFO(tpg)->tpg_get_tag(tpg));
+
+		spin_lock(&sep->sep_lun->lun_sep_lock);
+		num_cmds = sep->sep_stats.cmd_pdus;
+		rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
+		tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
+		spin_unlock(&sep->sep_lun->lun_sep_lock);
+
+		seq_printf(seq, "%llu %u %u %u\n", num_cmds,
+			rx_mbytes, tx_mbytes, 0);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_tgt_port_seq_ops = {
+	.start  = scsi_tgt_port_seq_start,
+	.next   = scsi_tgt_port_seq_next,
+	.stop   = scsi_tgt_port_seq_stop,
+	.show   = scsi_tgt_port_seq_show
+};
+
+static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_tgt_port_seq_ops);
+}
+
+static const struct file_operations scsi_tgt_port_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_tgt_port_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Authorized Initiator Table:
+ * It contains the SCSI Initiators authorized to be attached to one of the
+ * local Target ports.
+ * Iterates through all active TPGs and extracts the info from the ACLs
+ */
+static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	spin_lock_bh(&se_global->se_tpg_lock);
+	return seq_list_start(&se_global->g_se_tpg_list, *pos);
+}
+
+static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
+					 loff_t *pos)
+{
+	return seq_list_next(v, &se_global->g_se_tpg_list, pos);
+}
+
+static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
+{
+	spin_unlock_bh(&se_global->se_tpg_lock);
+}
+
+static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
+						se_tpg_list);
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_node_acl *se_nacl;
+	int j;
+
+	if (list_is_first(&se_tpg->se_tpg_list,
+			  &se_global->g_se_tpg_list))
+		seq_puts(seq, "inst dev port indx dev_or_port intr_name "
+			 "map_indx att_count num_cmds read_mbytes "
+			 "write_mbytes hs_num_cmds creation_time row_status\n");
+
+	if (!(se_tpg))
+		return 0;
+
+	spin_lock(&se_tpg->acl_node_lock);
+	list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
+
+		atomic_inc(&se_nacl->mib_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&se_tpg->acl_node_lock);
+
+		spin_lock_irq(&se_nacl->device_list_lock);
+		for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
+			deve = &se_nacl->device_list[j];
+			if (!(deve->lun_flags &
+					TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
+			    (!deve->se_lun))
+				continue;
+			lun = deve->se_lun;
+			if (!lun->lun_se_dev)
+				continue;
+
+			seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
+					" %u %s\n",
+				/* scsiInstIndex */
+				(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
+				TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
+				0,
+				/* scsiDeviceIndex */
+				lun->lun_se_dev->dev_index,
+				/* scsiAuthIntrTgtPortIndex */
+				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
+				/* scsiAuthIntrIndex */
+				se_nacl->acl_index,
+				/* scsiAuthIntrDevOrPort */
+				1,
+				/* scsiAuthIntrName */
+				se_nacl->initiatorname[0] ?
+					se_nacl->initiatorname : NONE,
+				/* FIXME: scsiAuthIntrLunMapIndex */
+				0,
+				/* scsiAuthIntrAttachedTimes */
+				deve->attach_count,
+				/* scsiAuthIntrOutCommands */
+				deve->total_cmds,
+				/* scsiAuthIntrReadMegaBytes */
+				(u32)(deve->read_bytes >> 20),
+				/* scsiAuthIntrWrittenMegaBytes */
+				(u32)(deve->write_bytes >> 20),
+				/* FIXME: scsiAuthIntrHSOutCommands */
+				0,
+				/* scsiAuthIntrLastCreation */
+				(u32)(((u32)deve->creation_time -
+					    INITIAL_JIFFIES) * 100 / HZ),
+				/* FIXME: scsiAuthIntrRowStatus */
+				"Ready");
+		}
+		spin_unlock_irq(&se_nacl->device_list_lock);
+
+		spin_lock(&se_tpg->acl_node_lock);
+		atomic_dec(&se_nacl->mib_ref_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&se_tpg->acl_node_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_auth_intr_seq_ops = {
+	.start	= scsi_auth_intr_seq_start,
+	.next	= scsi_auth_intr_seq_next,
+	.stop	= scsi_auth_intr_seq_stop,
+	.show	= scsi_auth_intr_seq_show
+};
+
+static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_auth_intr_seq_ops);
+}
+
+static const struct file_operations scsi_auth_intr_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_auth_intr_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Attached Initiator Port Table:
+ * It lists the SCSI Initiators attached to one of the local Target ports.
+ * Iterates through all active TPGs and use active sessions from each TPG
+ * to list the info fo this table.
+ */
+static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	spin_lock_bh(&se_global->se_tpg_lock);
+	return seq_list_start(&se_global->g_se_tpg_list, *pos);
+}
+
+static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
+					 loff_t *pos)
+{
+	return seq_list_next(v, &se_global->g_se_tpg_list, pos);
+}
+
+static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
+{
+	spin_unlock_bh(&se_global->se_tpg_lock);
+}
+
+static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
+						se_tpg_list);
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_node_acl *se_nacl;
+	struct se_session *se_sess;
+	unsigned char buf[64];
+	int j;
+
+	if (list_is_first(&se_tpg->se_tpg_list,
+			  &se_global->g_se_tpg_list))
+		seq_puts(seq, "inst dev port indx port_auth_indx port_name"
+			" port_ident\n");
+
+	if (!(se_tpg))
+		return 0;
+
+	spin_lock(&se_tpg->session_lock);
+	list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
+		if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
+		    (!se_sess->se_node_acl) ||
+		    (!se_sess->se_node_acl->device_list))
+			continue;
+
+		atomic_inc(&se_sess->mib_ref_count);
+		smp_mb__after_atomic_inc();
+		se_nacl = se_sess->se_node_acl;
+		atomic_inc(&se_nacl->mib_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&se_tpg->session_lock);
+
+		spin_lock_irq(&se_nacl->device_list_lock);
+		for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
+			deve = &se_nacl->device_list[j];
+			if (!(deve->lun_flags &
+					TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
+			   (!deve->se_lun))
+				continue;
+
+			lun = deve->se_lun;
+			if (!lun->lun_se_dev)
+				continue;
+
+			memset(buf, 0, 64);
+			if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
+				TPG_TFO(se_tpg)->sess_get_initiator_sid(
+					se_sess, (unsigned char *)&buf[0], 64);
+
+			seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
+				/* scsiInstIndex */
+				(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
+				TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
+				0,
+				/* scsiDeviceIndex */
+				lun->lun_se_dev->dev_index,
+				/* scsiPortIndex */
+				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
+				/* scsiAttIntrPortIndex */
+				(TPG_TFO(se_tpg)->sess_get_index != NULL) ?
+				TPG_TFO(se_tpg)->sess_get_index(se_sess) :
+				0,
+				/* scsiAttIntrPortAuthIntrIdx */
+				se_nacl->acl_index,
+				/* scsiAttIntrPortName */
+				se_nacl->initiatorname[0] ?
+					se_nacl->initiatorname : NONE,
+				/* scsiAttIntrPortIdentifier */
+				buf);
+		}
+		spin_unlock_irq(&se_nacl->device_list_lock);
+
+		spin_lock(&se_tpg->session_lock);
+		atomic_dec(&se_nacl->mib_ref_count);
+		smp_mb__after_atomic_dec();
+		atomic_dec(&se_sess->mib_ref_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&se_tpg->session_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_att_intr_port_seq_ops = {
+	.start	= scsi_att_intr_port_seq_start,
+	.next	= scsi_att_intr_port_seq_next,
+	.stop	= scsi_att_intr_port_seq_stop,
+	.show	= scsi_att_intr_port_seq_show
+};
+
+static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_att_intr_port_seq_ops);
+}
+
+static const struct file_operations scsi_att_intr_port_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_att_intr_port_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Logical Unit Table
+ */
+static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+#define SCSI_LU_INDEX		1
+static int scsi_lu_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	int j;
+	char str[28];
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
+		" dev_type status state-bit num_cmds read_mbytes"
+		" write_mbytes resets full_stat hs_num_cmds creation_time\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	/* Fix LU state, if we can read it from the device */
+	seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
+			dev->dev_index, SCSI_LU_INDEX,
+			(unsigned long long)0, /* FIXME: scsiLuDefaultLun */
+			(strlen(DEV_T10_WWN(dev)->unit_serial)) ?
+			/* scsiLuWwnName */
+			(char *)&DEV_T10_WWN(dev)->unit_serial[0] :
+			"None");
+
+	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+	/* scsiLuVendorId */
+	for (j = 0; j < 8; j++)
+		str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
+			DEV_T10_WWN(dev)->vendor[j] : 0x20;
+	str[8] = 0;
+	seq_printf(seq, " %s", str);
+
+	/* scsiLuProductId */
+	for (j = 0; j < 16; j++)
+		str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
+			DEV_T10_WWN(dev)->model[j] : 0x20;
+	str[16] = 0;
+	seq_printf(seq, " %s", str);
+
+	/* scsiLuRevisionId */
+	for (j = 0; j < 4; j++)
+		str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
+			DEV_T10_WWN(dev)->revision[j] : 0x20;
+	str[4] = 0;
+	seq_printf(seq, " %s", str);
+
+	seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
+		/* scsiLuPeripheralType */
+		   TRANSPORT(dev)->get_device_type(dev),
+		   (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
+		"available" : "notavailable", /* scsiLuStatus */
+		"exposed", 	/* scsiLuState */
+		(unsigned long long)dev->num_cmds,
+		/* scsiLuReadMegaBytes */
+		(u32)(dev->read_bytes >> 20),
+		/* scsiLuWrittenMegaBytes */
+		(u32)(dev->write_bytes >> 20),
+		dev->num_resets, /* scsiLuInResets */
+		0, /* scsiLuOutTaskSetFullStatus */
+		0, /* scsiLuHSInCommands */
+		(u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
+							100 / HZ));
+
+	return 0;
+}
+
+static const struct seq_operations scsi_lu_seq_ops = {
+	.start  = scsi_lu_seq_start,
+	.next   = scsi_lu_seq_next,
+	.stop   = scsi_lu_seq_stop,
+	.show   = scsi_lu_seq_show
+};
+
+static int scsi_lu_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_lu_seq_ops);
+}
+
+static const struct file_operations scsi_lu_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_lu_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/****************************************************************************/
+
+/*
+ * Remove proc fs entries
+ */
+void remove_scsi_target_mib(void)
+{
+	remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_port", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
+	remove_proc_entry("scsi_target/mib", NULL);
+}
+
+/*
+ * Create proc fs entries for the mib tables
+ */
+int init_scsi_target_mib(void)
+{
+	struct proc_dir_entry *dir_entry;
+	struct proc_dir_entry *scsi_inst_entry;
+	struct proc_dir_entry *scsi_dev_entry;
+	struct proc_dir_entry *scsi_port_entry;
+	struct proc_dir_entry *scsi_transport_entry;
+	struct proc_dir_entry *scsi_tgt_dev_entry;
+	struct proc_dir_entry *scsi_tgt_port_entry;
+	struct proc_dir_entry *scsi_auth_intr_entry;
+	struct proc_dir_entry *scsi_att_intr_port_entry;
+	struct proc_dir_entry *scsi_lu_entry;
+
+	dir_entry = proc_mkdir("scsi_target/mib", NULL);
+	if (!(dir_entry)) {
+		printk(KERN_ERR "proc_mkdir() failed.\n");
+		return -1;
+	}
+
+	scsi_inst_entry =
+		create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
+	if (scsi_inst_entry)
+		scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
+	else
+		goto error;
+
+	scsi_dev_entry =
+		create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
+	if (scsi_dev_entry)
+		scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
+	else
+		goto error;
+
+	scsi_port_entry =
+		create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
+	if (scsi_port_entry)
+		scsi_port_entry->proc_fops = &scsi_port_seq_fops;
+	else
+		goto error;
+
+	scsi_transport_entry =
+		create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
+	if (scsi_transport_entry)
+		scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
+	else
+		goto error;
+
+	scsi_tgt_dev_entry =
+		create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
+	if (scsi_tgt_dev_entry)
+		scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
+	else
+		goto error;
+
+	scsi_tgt_port_entry =
+		create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
+	if (scsi_tgt_port_entry)
+		scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
+	else
+		goto error;
+
+	scsi_auth_intr_entry =
+		create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
+	if (scsi_auth_intr_entry)
+		scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
+	else
+		goto error;
+
+	scsi_att_intr_port_entry =
+	      create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
+	if (scsi_att_intr_port_entry)
+		scsi_att_intr_port_entry->proc_fops =
+				&scsi_att_intr_port_seq_fops;
+	else
+		goto error;
+
+	scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
+	if (scsi_lu_entry)
+		scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
+	else
+		goto error;
+
+	return 0;
+
+error:
+	printk(KERN_ERR "create_proc_entry() failed.\n");
+	remove_scsi_target_mib();
+	return -1;
+}
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables
+ */
+void init_scsi_index_table(void)
+{
+	memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+	spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+	u32 new_index;
+
+	if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+		printk(KERN_ERR "Invalid index type %d\n", type);
+		return -1;
+	}
+
+	spin_lock(&scsi_index_table.lock);
+	new_index = ++scsi_index_table.scsi_mib_index[type];
+	if (new_index == 0)
+		new_index = ++scsi_index_table.scsi_mib_index[type];
+	spin_unlock(&scsi_index_table.lock);
+
+	return new_index;
+}
+EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
new file mode 100644
index 0000000..2772046
--- /dev/null
+++ b/drivers/target/target_core_mib.h
@@ -0,0 +1,28 @@
+#ifndef TARGET_CORE_MIB_H
+#define TARGET_CORE_MIB_H
+
+typedef enum {
+	SCSI_INST_INDEX,
+	SCSI_DEVICE_INDEX,
+	SCSI_AUTH_INTR_INDEX,
+	SCSI_INDEX_TYPE_MAX
+} scsi_index_t;
+
+struct scsi_index_table {
+	spinlock_t	lock;
+	u32 		scsi_mib_index[SCSI_INDEX_TYPE_MAX];
+} ____cacheline_aligned;
+
+/* SCSI Port stats */
+struct scsi_port_stats {
+	u64	cmd_pdus;
+	u64	tx_data_octets;
+	u64	rx_data_octets;
+} ____cacheline_aligned;
+
+extern int init_scsi_target_mib(void);
+extern void remove_scsi_target_mib(void);
+extern void init_scsi_index_table(void);
+extern u32 scsi_get_new_index(scsi_index_t);
+
+#endif   /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
new file mode 100644
index 0000000..2521f75
--- /dev/null
+++ b/drivers/target/target_core_pr.c
@@ -0,0 +1,4252 @@
+/*******************************************************************************
+ * Filename:  target_core_pr.c
+ *
+ * This file contains SPC-3 compliant persistent reservations and
+ * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
+ *
+ * Copyright (c) 2009, 2010 Rising Tide Systems
+ * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+/*
+ * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
+ */
+struct pr_transport_id_holder {
+	int dest_local_nexus;
+	struct t10_pr_registration *dest_pr_reg;
+	struct se_portal_group *dest_tpg;
+	struct se_node_acl *dest_node_acl;
+	struct se_dev_entry *dest_se_deve;
+	struct list_head dest_list;
+};
+
+int core_pr_dump_initiator_port(
+	struct t10_pr_registration *pr_reg,
+	char *buf,
+	u32 size)
+{
+	if (!(pr_reg->isid_present_at_reg))
+		return 0;
+
+	snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
+	return 1;
+}
+
+static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
+			struct t10_pr_registration *, int);
+
+static int core_scsi2_reservation_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	switch (cdb[0]) {
+	case INQUIRY:
+	case RELEASE:
+	case RELEASE_10:
+		return 0;
+	default:
+		return 1;
+	}
+
+	return 1;
+}
+
+static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	int ret;
+
+	if (!(sess))
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_reserved_node_acl || !sess) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return -1;
+	}
+	if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+static int core_scsi2_reservation_release(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg = sess->se_tpg;
+
+	if (!(sess) || !(tpg))
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_reserved_node_acl || !sess) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+
+	if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	dev->dev_reserved_node_acl = NULL;
+	dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+		dev->dev_res_bin_isid = 0;
+		dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
+		" MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+		sess->se_node_acl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return 0;
+}
+
+static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg = sess->se_tpg;
+
+	if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
+	    (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
+		printk(KERN_ERR "LongIO and Obselete Bits set, returning"
+				" ILLEGAL_REQUEST\n");
+		return PYX_TRANSPORT_ILLEGAL_REQUEST;
+	}
+	/*
+	 * This is currently the case for target_core_mod passthrough struct se_cmd
+	 * ops
+	 */
+	if (!(sess) || !(tpg))
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (dev->dev_reserved_node_acl &&
+	   (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+		printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+			TPG_TFO(tpg)->get_fabric_name());
+		printk(KERN_ERR "Original reserver LUN: %u %s\n",
+			SE_LUN(cmd)->unpacked_lun,
+			dev->dev_reserved_node_acl->initiatorname);
+		printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
+			" from %s \n", SE_LUN(cmd)->unpacked_lun,
+			cmd->se_deve->mapped_lun,
+			sess->se_node_acl->initiatorname);
+		spin_unlock(&dev->dev_reservation_lock);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+
+	dev->dev_reserved_node_acl = sess->se_node_acl;
+	dev->dev_flags |= DF_SPC2_RESERVATIONS;
+	if (sess->sess_bin_isid != 0) {
+		dev->dev_res_bin_isid = sess->sess_bin_isid;
+		dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+		" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+		sess->se_node_acl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return 0;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
+					struct se_node_acl *, struct se_session *);
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
+
+/*
+ * Setup in target_core_transport.c:transport_generic_cmd_sequencer()
+ * and called via struct se_cmd->transport_emulate_cdb() in TCM processing
+ * thread context.
+ */
+int core_scsi2_emulate_crh(struct se_cmd *cmd)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+	struct t10_pr_registration *pr_reg;
+	struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
+	unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+	int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
+	int conflict = 0;
+
+	if (!(se_sess))
+		return 0;
+
+	if (!(crh))
+		goto after_crh;
+
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+			se_sess);
+	if (pr_reg) {
+		/*
+		 * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
+		 * behavior
+		 *
+		 * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
+		 * status, but no reservation shall be established and the
+		 * persistent reservation shall not be changed, if the command
+		 * is received from a) and b) below.
+		 *
+		 * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
+		 * status, but the persistent reservation shall not be released,
+		 * if the command is received from a) and b)
+		 *
+		 * a) An I_T nexus that is a persistent reservation holder; or
+		 * b) An I_T nexus that is registered if a registrants only or
+		 *    all registrants type persistent reservation is present.
+		 *
+		 * In all other cases, a RESERVE(6) command, RESERVE(10) command,
+		 * RELEASE(6) command, or RELEASE(10) command shall be processed
+		 * as defined in SPC-2.
+		 */
+		if (pr_reg->pr_res_holder) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 0;
+		}
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 0;
+		}
+		core_scsi3_put_pr_reg(pr_reg);
+		conflict = 1;
+	} else {
+		/*
+		 * Following spc2r20 5.5.1 Reservations overview:
+		 *
+		 * If a logical unit has executed a PERSISTENT RESERVE OUT
+		 * command with the REGISTER or the REGISTER AND IGNORE
+		 * EXISTING KEY service action and is still registered by any
+		 * initiator, all RESERVE commands and all RELEASE commands
+		 * regardless of initiator shall conflict and shall terminate
+		 * with a RESERVATION CONFLICT status.
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+
+	if (conflict) {
+		printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
+			" while active SPC-3 registrations exist,"
+			" returning RESERVATION_CONFLICT\n");
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+
+after_crh:
+	if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10))
+		return core_scsi2_reservation_reserve(cmd);
+	else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10))
+		return core_scsi2_reservation_release(cmd);
+	else
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * Begin SPC-3/SPC-4 Persistent Reservations emulation support
+ *
+ * This function is called by those initiator ports who are *NOT*
+ * the active PR reservation holder when a reservation is present.
+ */
+static int core_scsi3_pr_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	struct se_dev_entry *se_deve;
+	struct se_session *se_sess = SE_SESS(cmd);
+	int other_cdb = 0, ignore_reg;
+	int registered_nexus = 0, ret = 1; /* Conflict by default */
+	int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
+	int we = 0; /* Write Exclusive */
+	int legacy = 0; /* Act like a legacy device and return
+			 * RESERVATION CONFLICT on some CDBs */
+	/*
+	 * A legacy SPC-2 reservation is being held.
+	 */
+	if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
+		return core_scsi2_reservation_seq_non_holder(cmd,
+					cdb, pr_reg_type);
+
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Determine if the registration should be ignored due to
+	 * non-matching ISIDs in core_scsi3_pr_reservation_check().
+	 */
+	ignore_reg = (pr_reg_type & 0x80000000);
+	if (ignore_reg)
+		pr_reg_type &= ~0x80000000;
+
+	switch (pr_reg_type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		/*
+		 * Some commands are only allowed for the persistent reservation
+		 * holder.
+		 */
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		/*
+		 * Some commands are only allowed for registered I_T Nexuses.
+		 */
+		reg_only = 1;
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		/*
+		 * Each registered I_T Nexus is a reservation holder.
+		 */
+		all_reg = 1;
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	default:
+		return -1;
+	}
+	/*
+	 * Referenced from spc4r17 table 45 for *NON* PR holder access
+	 */
+	switch (cdb[0]) {
+	case SECURITY_PROTOCOL_IN:
+		if (registered_nexus)
+			return 0;
+		ret = (we) ? 0 : 1;
+		break;
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+	case READ_ATTRIBUTE:
+	case READ_BUFFER:
+	case RECEIVE_DIAGNOSTIC:
+		if (legacy) {
+			ret = 1;
+			break;
+		}
+		if (registered_nexus) {
+			ret = 0;
+			break;
+		}
+		ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+		break;
+	case PERSISTENT_RESERVE_OUT:
+		/*
+		 * This follows PERSISTENT_RESERVE_OUT service actions that
+		 * are allowed in the presence of various reservations.
+		 * See spc4r17, table 46
+		 */
+		switch (cdb[1] & 0x1f) {
+		case PRO_CLEAR:
+		case PRO_PREEMPT:
+		case PRO_PREEMPT_AND_ABORT:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		case PRO_REGISTER:
+		case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+			ret = 0;
+			break;
+		case PRO_REGISTER_AND_MOVE:
+		case PRO_RESERVE:
+			ret = 1;
+			break;
+		case PRO_RELEASE:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		default:
+			printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+				" action: 0x%02x\n", cdb[1] & 0x1f);
+			return -1;
+		}
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		/* Handled by CRH=1 in core_scsi2_emulate_crh() */
+		ret = 0;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/* Handled by CRH=1 in core_scsi2_emulate_crh() */
+		ret = 0;
+		break;
+	case TEST_UNIT_READY:
+		ret = (legacy) ? 1 : 0; /* Conflict for legacy */
+		break;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_MANAGEMENT_PROTOCOL_IN:
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_SUPPORTED_OPERATION_CODES:
+		case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
+			if (legacy) {
+				ret = 1;
+				break;
+			}
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_ALIASES:
+		case MI_REPORT_IDENTIFYING_INFORMATION:
+		case MI_REPORT_PRIORITY:
+		case MI_REPORT_TARGET_PGS:
+		case MI_REPORT_TIMESTAMP:
+			ret = 0; /* Allowed */
+			break;
+		default:
+			printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
+				(cdb[1] & 0x1f));
+			return -1;
+		}
+		break;
+	case ACCESS_CONTROL_IN:
+	case ACCESS_CONTROL_OUT:
+	case INQUIRY:
+	case LOG_SENSE:
+	case READ_MEDIA_SERIAL_NUMBER:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+		ret = 0; /*/ Allowed CDBs */
+		break;
+	default:
+		other_cdb = 1;
+		break;
+	}
+	/*
+	 * Case where the CDB is explictly allowed in the above switch
+	 * statement.
+	 */
+	if (!(ret) && !(other_cdb)) {
+#if 0
+		printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
+			" reservation holder\n", cdb[0],
+			core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+		return ret;
+	}
+	/*
+	 * Check if write exclusive initiator ports *NOT* holding the
+	 * WRITE_EXCLUSIVE_* reservation.
+	 */
+	if ((we) && !(registered_nexus)) {
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			/*
+			 * Conflict for write exclusive
+			 */
+			printk(KERN_INFO "%s Conflict for unregistered nexus"
+				" %s CDB: 0x%02x to %s reservation\n",
+				transport_dump_cmd_direction(cmd),
+				se_sess->se_node_acl->initiatorname, cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+			return 1;
+		} else {
+			/*
+			 * Allow non WRITE CDBs for all Write Exclusive
+			 * PR TYPEs to pass for registered and
+			 * non-registered_nexuxes NOT holding the reservation.
+			 *
+			 * We only make noise for the unregisterd nexuses,
+			 * as we expect registered non-reservation holding
+			 * nexuses to issue CDBs.
+			 */
+#if 0
+			if (!(registered_nexus)) {
+				printk(KERN_INFO "Allowing implict CDB: 0x%02x"
+					" for %s reservation on unregistered"
+					" nexus\n", cdb[0],
+					core_scsi3_pr_dump_type(pr_reg_type));
+			}
+#endif
+			return 0;
+		}
+	} else if ((reg_only) || (all_reg)) {
+		if (registered_nexus) {
+			/*
+			 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
+			 * allow commands from registered nexuses.
+			 */
+#if 0
+			printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
+				" reservation\n", cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+			return 0;
+		}
+	}
+	printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+		" for %s reservation\n", transport_dump_cmd_direction(cmd),
+		(registered_nexus) ? "" : "un",
+		se_sess->se_node_acl->initiatorname, cdb[0],
+		core_scsi3_pr_dump_type(pr_reg_type));
+
+	return 1; /* Conflict by default */
+}
+
+static u32 core_scsi3_pr_generation(struct se_device *dev)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	u32 prg;
+	/*
+	 * PRGeneration field shall contain the value of a 32-bit wrapping
+	 * counter mainted by the device server.
+	 *
+	 * Note that this is done regardless of Active Persist across
+	 * Target PowerLoss (APTPL)
+	 *
+	 * See spc4r17 section 6.3.12 READ_KEYS service action
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	prg = T10_RES(su_dev)->pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return prg;
+}
+
+static int core_scsi3_pr_reservation_check(
+	struct se_cmd *cmd,
+	u32 *pr_reg_type)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	int ret;
+
+	if (!(sess))
+		return 0;
+	/*
+	 * A legacy SPC-2 reservation is being held.
+	 */
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS)
+		return core_scsi2_reservation_check(cmd, pr_reg_type);
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!(dev->dev_pr_res_holder)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	*pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+	cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+	if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return -1;
+	}
+	if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
+	       sess->sess_bin_isid) ? 0 : -1;
+	/*
+	 * Use bit in *pr_reg_type to notify ISID mismatch in
+	 * core_scsi3_pr_seq_non_holder().
+	 */
+	if (ret != 0)
+		*pr_reg_type |= 0x80000000;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+		return NULL;
+	}
+
+	pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
+					GFP_ATOMIC);
+	if (!(pr_reg->pr_aptpl_buf)) {
+		printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = nacl;
+	pr_reg->pr_reg_deve = deve;
+	pr_reg->pr_res_mapped_lun = deve->mapped_lun;
+	pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = aptpl;
+	pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
+	/*
+	 * If an ISID value for this SCSI Initiator Port exists,
+	 * save it to the registration now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+
+	return pr_reg;
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
+
+/*
+ * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
+ * modes.
+ */
+static struct t10_pr_registration *__core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_dev_entry *deve_tmp;
+	struct se_node_acl *nacl_tmp;
+	struct se_port *port, *port_tmp;
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
+	int ret;
+	/*
+	 * Create a registration for the I_T Nexus upon which the
+	 * PROUT REGISTER was received.
+	 */
+	pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
+			sa_res_key, all_tg_pt, aptpl);
+	if (!(pr_reg))
+		return NULL;
+	/*
+	 * Return pointer to pr_reg for ALL_TG_PT=0
+	 */
+	if (!(all_tg_pt))
+		return pr_reg;
+	/*
+	 * Create list of matching SCSI Initiator Port registrations
+	 * for ALL_TG_PT=1
+	 */
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
+		atomic_inc(&port->sep_tg_pt_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&dev->se_port_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_for_each_entry(deve_tmp, &port->sep_alua_list,
+					alua_port_list) {
+			/*
+			 * This pointer will be NULL for demo mode MappedLUNs
+			 * that have not been make explict via a ConfigFS
+			 * MappedLUN group for the SCSI Initiator Node ACL.
+			 */
+			if (!(deve_tmp->se_lun_acl))
+				continue;
+
+			nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
+			/*
+			 * Skip the matching struct se_node_acl that is allocated
+			 * above..
+			 */
+			if (nacl == nacl_tmp)
+				continue;
+			/*
+			 * Only perform PR registrations for target ports on
+			 * the same fabric module as the REGISTER w/ ALL_TG_PT=1
+			 * arrived.
+			 */
+			if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
+				continue;
+			/*
+			 * Look for a matching Initiator Node ACL in ASCII format
+			 */
+			if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
+				continue;
+
+			atomic_inc(&deve_tmp->pr_ref_count);
+			smp_mb__after_atomic_inc();
+			spin_unlock_bh(&port->sep_alua_lock);
+			/*
+			 * Grab a configfs group dependency that is released
+			 * for the exception path at label out: below, or upon
+			 * completion of adding ALL_TG_PT=1 registrations in
+			 * __core_scsi3_add_registration()
+			 */
+			ret = core_scsi3_lunacl_depend_item(deve_tmp);
+			if (ret < 0) {
+				printk(KERN_ERR "core_scsi3_lunacl_depend"
+						"_item() failed\n");
+				atomic_dec(&port->sep_tg_pt_ref_cnt);
+				smp_mb__after_atomic_dec();
+				atomic_dec(&deve_tmp->pr_ref_count);
+				smp_mb__after_atomic_dec();
+				goto out;
+			}
+			/*
+			 * Located a matching SCSI Initiator Port on a different
+			 * port, allocate the pr_reg_atp and attach it to the
+			 * pr_reg->pr_reg_atp_list that will be processed once
+			 * the original *pr_reg is processed in
+			 * __core_scsi3_add_registration()
+			 */
+			pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
+						nacl_tmp, deve_tmp, NULL,
+						sa_res_key, all_tg_pt, aptpl);
+			if (!(pr_reg_atp)) {
+				atomic_dec(&port->sep_tg_pt_ref_cnt);
+				smp_mb__after_atomic_dec();
+				atomic_dec(&deve_tmp->pr_ref_count);
+				smp_mb__after_atomic_dec();
+				core_scsi3_lunacl_undepend_item(deve_tmp);
+				goto out;
+			}
+
+			list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
+				      &pr_reg->pr_reg_atp_list);
+			spin_lock_bh(&port->sep_alua_lock);
+		}
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		spin_lock(&dev->se_port_lock);
+		atomic_dec(&port->sep_tg_pt_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return pr_reg;
+out:
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+	}
+	kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	return NULL;
+}
+
+int core_scsi3_alloc_aptpl_registration(
+	struct t10_reservation_template *pr_tmpl,
+	u64 sa_res_key,
+	unsigned char *i_port,
+	unsigned char *isid,
+	u32 mapped_lun,
+	unsigned char *t_port,
+	u16 tpgt,
+	u32 target_lun,
+	int res_holder,
+	int all_tg_pt,
+	u8 type)
+{
+	struct t10_pr_registration *pr_reg;
+
+	if (!(i_port) || !(t_port) || !(sa_res_key)) {
+		printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+		return -1;
+	}
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+		return -1;
+	}
+	pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = NULL;
+	pr_reg->pr_reg_deve = NULL;
+	pr_reg->pr_res_mapped_lun = mapped_lun;
+	pr_reg->pr_aptpl_target_lun = target_lun;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = 1;
+	pr_reg->pr_reg_tg_pt_lun = NULL;
+	pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
+	pr_reg->pr_res_type = type;
+	/*
+	 * If an ISID value had been saved in APTPL metadata for this
+	 * SCSI Initiator Port, restore it now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+	/*
+	 * Copy the i_port and t_port information from caller.
+	 */
+	snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
+	snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
+	pr_reg->pr_reg_tpgt = tpgt;
+	/*
+	 * Set pr_res_holder from caller, the pr_reg who is the reservation
+	 * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
+	 * the Initiator Node LUN ACL from the fabric module is created for
+	 * this registration.
+	 */
+	pr_reg->pr_res_holder = res_holder;
+
+	list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
+	printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
+			" metadata\n", (res_holder) ? "+reservation" : "");
+	return 0;
+}
+
+static void core_scsi3_aptpl_reserve(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_node_acl *node_acl,
+	struct t10_pr_registration *pr_reg)
+{
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	spin_lock(&dev->dev_reservation_lock);
+	dev->dev_pr_res_holder = pr_reg;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+		" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		TPG_TFO(tpg)->get_fabric_name(),
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+		TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+}
+
+static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
+				struct t10_pr_registration *, int, int);
+
+static int __core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u32 target_lun,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
+	unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+	u16 tpgt;
+
+	memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
+	memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
+	/*
+	 * Copy Initiator Port information from struct se_node_acl
+	 */
+	snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
+	snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
+			TPG_TFO(tpg)->tpg_get_wwn(tpg));
+	tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+	/*
+	 * Look for the matching registrations+reservation from those
+	 * created from APTPL metadata.  Note that multiple registrations
+	 * may exist for fabrics that use ISIDs in their SCSI Initiator Port
+	 * TransportIDs.
+	 */
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+		if (!(strcmp(pr_reg->pr_iport, i_port)) &&
+		     (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
+		    !(strcmp(pr_reg->pr_tport, t_port)) &&
+		     (pr_reg->pr_reg_tpgt == tpgt) &&
+		     (pr_reg->pr_aptpl_target_lun == target_lun)) {
+
+			pr_reg->pr_reg_nacl = nacl;
+			pr_reg->pr_reg_deve = deve;
+			pr_reg->pr_reg_tg_pt_lun = lun;
+
+			list_del(&pr_reg->pr_reg_aptpl_list);
+			spin_unlock(&pr_tmpl->aptpl_reg_lock);
+			/*
+			 * At this point all of the pointers in *pr_reg will
+			 * be setup, so go ahead and add the registration.
+			 */
+
+			__core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
+			/*
+			 * If this registration is the reservation holder,
+			 * make that happen now..
+			 */
+			if (pr_reg->pr_res_holder)
+				core_scsi3_aptpl_reserve(dev, tpg,
+						nacl, pr_reg);
+			/*
+			 * Reenable pr_aptpl_active to accept new metadata
+			 * updates once the SCSI device is active again..
+			 */
+			spin_lock(&pr_tmpl->aptpl_reg_lock);
+			pr_tmpl->pr_aptpl_active = 1;
+		}
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+
+	return 0;
+}
+
+int core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	struct se_lun_acl *lun_acl)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct se_node_acl *nacl = lun_acl->se_lun_nacl;
+	struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
+				lun->unpacked_lun, nacl, deve);
+}
+
+static void __core_scsi3_dump_registration(
+	struct target_core_fabric_ops *tfo,
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	int register_type)
+{
+	struct se_portal_group *se_tpg = nacl->se_tpg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
+		"_AND_MOVE" : (register_type == 1) ?
+		"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
+		(prf_isid) ? i_buf : "");
+	printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+		 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
+		tfo->tpg_get_tag(se_tpg));
+	printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n",  tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		TRANSPORT(dev)->name);
+	printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x  APTPL: %d\n", tfo->get_fabric_name(),
+		pr_reg->pr_res_key, pr_reg->pr_res_generation,
+		pr_reg->pr_reg_aptpl);
+}
+
+/*
+ * this function can be called with struct se_device->dev_reservation_lock
+ * when register_move = 1
+ */
+static void __core_scsi3_add_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	int register_type,
+	int register_move)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+
+	/*
+	 * Increment PRgeneration counter for struct se_device upon a successful
+	 * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
+	 *
+	 * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
+	 * action, the struct se_device->dev_reservation_lock will already be held,
+	 * so we do not call core_scsi3_pr_generation() which grabs the lock
+	 * for the REGISTER.
+	 */
+	pr_reg->pr_res_generation = (register_move) ?
+			T10_RES(su_dev)->pr_generation++ :
+			core_scsi3_pr_generation(dev);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
+	pr_reg->pr_reg_deve->def_pr_registered = 1;
+
+	__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
+	 */
+	if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
+		return;
+	/*
+	 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
+	 * allocated in __core_scsi3_alloc_registration()
+	 */
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+
+		pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		list_add_tail(&pr_reg_tmp->pr_reg_list,
+			      &pr_tmpl->registration_list);
+		pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
+
+		__core_scsi3_dump_registration(tfo, dev,
+				pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
+				register_type);
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Drop configfs group dependency reference from
+		 * __core_scsi3_alloc_registration()
+		 */
+		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+	}
+}
+
+static int core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl,
+	int register_type,
+	int register_move)
+{
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
+			sa_res_key, all_tg_pt, aptpl);
+	if (!(pr_reg))
+		return -1;
+
+	__core_scsi3_add_registration(dev, nacl, pr_reg,
+			register_type, register_move);
+	return 0;
+}
+
+static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	unsigned char *isid)
+{
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct se_portal_group *tpg;
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+		/*
+		 * First look for a matching struct se_node_acl
+		 */
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		/*
+		 * If this registration does NOT contain a fabric provided
+		 * ISID, then we have found a match.
+		 */
+		if (!(pr_reg->isid_present_at_reg)) {
+			/*
+			 * Determine if this SCSI device server requires that
+			 * SCSI Intiatior TransportID w/ ISIDs is enforced
+			 * for fabric modules (iSCSI) requiring them.
+			 */
+			if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+				if (DEV_ATTRIB(dev)->enforce_pr_isids)
+					continue;
+			}
+			atomic_inc(&pr_reg->pr_res_holders);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&pr_tmpl->registration_lock);
+			return pr_reg;
+		}
+		/*
+		 * If the *pr_reg contains a fabric defined ISID for multi-value
+		 * SCSI Initiator Port TransportIDs, then we expect a valid
+		 * matching ISID to be provided by the local SCSI Initiator Port.
+		 */
+		if (!(isid))
+			continue;
+		if (strcmp(isid, pr_reg->pr_reg_isid))
+			continue;
+
+		atomic_inc(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&pr_tmpl->registration_lock);
+		return pr_reg;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	return NULL;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_session *sess)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+	unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+
+	if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+		memset(&buf[0], 0, PR_REG_ISID_LEN);
+		TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
+					PR_REG_ISID_LEN);
+		isid_ptr = &buf[0];
+	}
+
+	return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
+}
+
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
+{
+	atomic_dec(&pr_reg->pr_res_holders);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_check_implict_release(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct t10_pr_registration *pr_res_holder;
+	int ret = 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!(pr_res_holder)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return ret;
+	}
+	if (pr_res_holder == pr_reg) {
+		/*
+		 * Perform an implict RELEASE if the registration that
+		 * is being released is holding the reservation.
+		 *
+		 * From spc4r17, section 5.7.11.1:
+		 *
+		 * e) If the I_T nexus is the persistent reservation holder
+		 *    and the persistent reservation is not an all registrants
+		 *    type, then a PERSISTENT RESERVE OUT command with REGISTER
+		 *    service action or REGISTER AND  IGNORE EXISTING KEY
+		 *    service action with the SERVICE ACTION RESERVATION KEY
+		 *    field set to zero (see 5.7.11.3).
+		 */
+		__core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+		ret = 1;
+		/*
+		 * For 'All Registrants' reservation types, all existing
+		 * registrations are still processed as reservation holders
+		 * in core_scsi3_pr_seq_non_holder() after the initial
+		 * reservation holder is implictly released here.
+		 */
+	} else if (pr_reg->pr_reg_all_tg_pt &&
+		  (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
+			  pr_reg->pr_reg_nacl->initiatorname)) &&
+		  (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
+			" UNREGISTER while existing reservation with matching"
+			" key 0x%016Lx is present from another SCSI Initiator"
+			" Port\n", pr_reg->pr_res_key);
+		ret = -1;
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+/*
+ * Called with struct t10_reservation_template->registration_lock held.
+ */
+static void __core_scsi3_free_registration(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int dec_holders)
+{
+	struct target_core_fabric_ops *tfo =
+			pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	pr_reg->pr_reg_deve->def_pr_registered = 0;
+	pr_reg->pr_reg_deve->pr_res_key = 0;
+	list_del(&pr_reg->pr_reg_list);
+	/*
+	 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
+	 * so call core_scsi3_put_pr_reg() to decrement our reference.
+	 */
+	if (dec_holders)
+		core_scsi3_put_pr_reg(pr_reg);
+	/*
+	 * Wait until all reference from any other I_T nexuses for this
+	 * *pr_reg have been released.  Because list_del() is called above,
+	 * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
+	 * count back to zero, and we release *pr_reg.
+	 */
+	while (atomic_read(&pr_reg->pr_res_holders) != 0) {
+		spin_unlock(&pr_tmpl->registration_lock);
+		printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
+				tfo->get_fabric_name());
+		cpu_relax();
+		spin_lock(&pr_tmpl->registration_lock);
+	}
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(),
+		pr_reg->pr_reg_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+	printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n", tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		TRANSPORT(dev)->name);
+	printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
+		pr_reg->pr_res_generation);
+
+	if (!(preempt_and_abort_list)) {
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+		return;
+	}
+	/*
+	 * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
+	 * are released once the ABORT_TASK_SET has completed..
+	 */
+	list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
+}
+
+void core_scsi3_free_pr_reg_from_nacl(
+	struct se_device *dev,
+	struct se_node_acl *nacl)
+{
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+	/*
+	 * If the passed se_node_acl matches the reservation holder,
+	 * release the reservation.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if ((pr_res_holder != NULL) &&
+	    (pr_res_holder->pr_reg_nacl == nacl))
+		__core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Release any registration associated with the struct se_node_acl.
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+}
+
+void core_scsi3_free_all_registrations(
+	struct se_device *dev)
+{
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder != NULL) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+				pr_res_holder, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+		list_del(&pr_reg->pr_reg_aptpl_list);
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+}
+
+static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
+{
+	return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+			&tpg->tpg_group.cg_item);
+}
+
+static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
+{
+	configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+			&tpg->tpg_group.cg_item);
+
+	atomic_dec(&tpg->tpg_pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+
+	if (nacl->dynamic_node_acl)
+		return 0;
+
+	return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+			&nacl->acl_group.cg_item);
+}
+
+static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+
+	if (nacl->dynamic_node_acl) {
+		atomic_dec(&nacl->acl_pr_ref_count);
+		smp_mb__after_atomic_dec();
+		return;
+	}
+
+	configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+			&nacl->acl_group.cg_item);
+
+	atomic_dec(&nacl->acl_pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	if (!(lun_acl))
+		return 0;
+
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+			&lun_acl->se_lun_group.cg_item);
+}
+
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	if (!(lun_acl)) {
+		atomic_dec(&se_deve->pr_ref_count);
+		smp_mb__after_atomic_dec();
+		return;
+	}
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+			&lun_acl->se_lun_group.cg_item);
+
+	atomic_dec(&se_deve->pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_decode_spec_i_port(
+	struct se_cmd *cmd,
+	struct se_portal_group *tpg,
+	unsigned char *l_isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_port *tmp_port;
+	struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_node_acl *dest_node_acl = NULL;
+	struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
+	struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	struct list_head tid_dest_list;
+	struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
+	struct target_core_fabric_ops *tmp_tf_ops;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
+	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+	u32 tpdl, tid_len = 0;
+	int ret, dest_local_nexus, prf_isid;
+	u32 dest_rtpi = 0;
+
+	memset(dest_iport, 0, 64);
+	INIT_LIST_HEAD(&tid_dest_list);
+
+	local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Allocate a struct pr_transport_id_holder and setup the
+	 * local_node_acl and local_se_deve pointers and add to
+	 * struct list_head tid_dest_list for add registration
+	 * processing in the loop of tid_dest_list below.
+	 */
+	tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
+	if (!(tidh_new)) {
+		printk(KERN_ERR "Unable to allocate tidh_new\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	INIT_LIST_HEAD(&tidh_new->dest_list);
+	tidh_new->dest_tpg = tpg;
+	tidh_new->dest_node_acl = se_sess->se_node_acl;
+	tidh_new->dest_se_deve = local_se_deve;
+
+	local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+				se_sess->se_node_acl, local_se_deve, l_isid,
+				sa_res_key, all_tg_pt, aptpl);
+	if (!(local_pr_reg)) {
+		kfree(tidh_new);
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	tidh_new->dest_pr_reg = local_pr_reg;
+	/*
+	 * The local I_T nexus does not hold any configfs dependances,
+	 * so we set tid_h->dest_local_nexus=1 to prevent the
+	 * configfs_undepend_item() calls in the tid_dest_list loops below.
+	 */
+	tidh_new->dest_local_nexus = 1;
+	list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+	/*
+	 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
+	 * first extract TransportID Parameter Data Length, and make sure
+	 * the value matches up to the SCSI expected data transfer length.
+	 */
+	tpdl = (buf[24] & 0xff) << 24;
+	tpdl |= (buf[25] & 0xff) << 16;
+	tpdl |= (buf[26] & 0xff) << 8;
+	tpdl |= buf[27] & 0xff;
+
+	if ((tpdl + 28) != cmd->data_length) {
+		printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+			" does not equal CDB data_length: %u\n", tpdl,
+			cmd->data_length);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	/*
+	 * Start processing the received transport IDs using the
+	 * receiving I_T Nexus portal's fabric dependent methods to
+	 * obtain the SCSI Initiator Port/Device Identifiers.
+	 */
+	ptr = &buf[28];
+
+	while (tpdl > 0) {
+		proto_ident = (ptr[0] & 0x0f);
+		dest_tpg = NULL;
+
+		spin_lock(&dev->se_port_lock);
+		list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
+			tmp_tpg = tmp_port->sep_tpg;
+			if (!(tmp_tpg))
+				continue;
+			tmp_tf_ops = TPG_TFO(tmp_tpg);
+			if (!(tmp_tf_ops))
+				continue;
+			if (!(tmp_tf_ops->get_fabric_proto_ident) ||
+			    !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
+				continue;
+			/*
+			 * Look for the matching proto_ident provided by
+			 * the received TransportID
+			 */
+			tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
+			if (tmp_proto_ident != proto_ident)
+				continue;
+			dest_rtpi = tmp_port->sep_rtpi;
+
+			i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
+					tmp_tpg, (const char *)ptr, &tid_len,
+					&iport_ptr);
+			if (!(i_str))
+				continue;
+
+			atomic_inc(&tmp_tpg->tpg_pr_ref_count);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&dev->se_port_lock);
+
+			ret = core_scsi3_tpg_depend_item(tmp_tpg);
+			if (ret != 0) {
+				printk(KERN_ERR " core_scsi3_tpg_depend_item()"
+					" for tmp_tpg\n");
+				atomic_dec(&tmp_tpg->tpg_pr_ref_count);
+				smp_mb__after_atomic_dec();
+				ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+				goto out;
+			}
+			/*
+			 * Locate the desination initiator ACL to be registered
+			 * from the decoded fabric module specific TransportID
+			 * at *i_str.
+			 */
+			spin_lock_bh(&tmp_tpg->acl_node_lock);
+			dest_node_acl = __core_tpg_get_initiator_node_acl(
+						tmp_tpg, i_str);
+			if (dest_node_acl) {
+				atomic_inc(&dest_node_acl->acl_pr_ref_count);
+				smp_mb__after_atomic_inc();
+			}
+			spin_unlock_bh(&tmp_tpg->acl_node_lock);
+
+			if (!(dest_node_acl)) {
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				spin_lock(&dev->se_port_lock);
+				continue;
+			}
+
+			ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+			if (ret != 0) {
+				printk(KERN_ERR "configfs_depend_item() failed"
+					" for dest_node_acl->acl_group\n");
+				atomic_dec(&dest_node_acl->acl_pr_ref_count);
+				smp_mb__after_atomic_dec();
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+				goto out;
+			}
+
+			dest_tpg = tmp_tpg;
+			printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
+				" %s Port RTPI: %hu\n",
+				TPG_TFO(dest_tpg)->get_fabric_name(),
+				dest_node_acl->initiatorname, dest_rtpi);
+
+			spin_lock(&dev->se_port_lock);
+			break;
+		}
+		spin_unlock(&dev->se_port_lock);
+
+		if (!(dest_tpg)) {
+			printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
+					" dest_tpg\n");
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+#if 0
+		printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+			" tid_len: %d for %s + %s\n",
+			TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
+			tpdl, tid_len, i_str, iport_ptr);
+#endif
+		if (tid_len > tpdl) {
+			printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+				" %u for Transport ID: %s\n", tid_len, ptr);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		/*
+		 * Locate the desintation struct se_dev_entry pointer for matching
+		 * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
+		 * Target Port.
+		 */
+		dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
+					dest_rtpi);
+		if (!(dest_se_deve)) {
+			printk(KERN_ERR "Unable to locate %s dest_se_deve"
+				" from destination RTPI: %hu\n",
+				TPG_TFO(dest_tpg)->get_fabric_name(),
+				dest_rtpi);
+
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+
+		ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+		if (ret < 0) {
+			printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
+					" failed\n");
+			atomic_dec(&dest_se_deve->pr_ref_count);
+			smp_mb__after_atomic_dec();
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+			goto out;
+		}
+#if 0
+		printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+			" dest_se_deve mapped_lun: %u\n",
+			TPG_TFO(dest_tpg)->get_fabric_name(),
+			dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
+#endif
+		/*
+		 * Skip any TransportIDs that already have a registration for
+		 * this target port.
+		 */
+		pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+		if (pr_reg_e) {
+			core_scsi3_put_pr_reg(pr_reg_e);
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ptr += tid_len;
+			tpdl -= tid_len;
+			tid_len = 0;
+			continue;
+		}
+		/*
+		 * Allocate a struct pr_transport_id_holder and setup
+		 * the dest_node_acl and dest_se_deve pointers for the
+		 * loop below.
+		 */
+		tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
+				GFP_KERNEL);
+		if (!(tidh_new)) {
+			printk(KERN_ERR "Unable to allocate tidh_new\n");
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+			goto out;
+		}
+		INIT_LIST_HEAD(&tidh_new->dest_list);
+		tidh_new->dest_tpg = dest_tpg;
+		tidh_new->dest_node_acl = dest_node_acl;
+		tidh_new->dest_se_deve = dest_se_deve;
+
+		/*
+		 * Allocate, but do NOT add the registration for the
+		 * TransportID referenced SCSI Initiator port.  This
+		 * done because of the following from spc4r17 in section
+		 * 6.14.3 wrt SPEC_I_PT:
+		 *
+		 * "If a registration fails for any initiator port (e.g., if th
+		 * logical unit does not have enough resources available to
+		 * hold the registration information), no registrations shall be
+		 * made, and the command shall be terminated with
+		 * CHECK CONDITION status."
+		 *
+		 * That means we call __core_scsi3_alloc_registration() here,
+		 * and then call __core_scsi3_add_registration() in the
+		 * 2nd loop which will never fail.
+		 */
+		dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+				dest_node_acl, dest_se_deve, iport_ptr,
+				sa_res_key, all_tg_pt, aptpl);
+		if (!(dest_pr_reg)) {
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			kfree(tidh_new);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		tidh_new->dest_pr_reg = dest_pr_reg;
+		list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+		ptr += tid_len;
+		tpdl -= tid_len;
+		tid_len = 0;
+
+	}
+	/*
+	 * Go ahead and create a registrations from tid_dest_list for the
+	 * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
+	 * and dest_se_deve.
+	 *
+	 * The SA Reservation Key from the PROUT is set for the
+	 * registration, and ALL_TG_PT is also passed.  ALL_TG_PT=1
+	 * means that the TransportID Initiator port will be
+	 * registered on all of the target ports in the SCSI target device
+	 * ALL_TG_PT=0 means the registration will only be for the
+	 * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
+	 * was received.
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+		dest_local_nexus = tidh->dest_local_nexus;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
+						PR_REG_ISID_ID_LEN);
+
+		__core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
+					dest_pr_reg, 0, 0);
+
+		printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
+			" registered Transport ID for Node: %s%s Mapped LUN:"
+			" %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
+			dest_node_acl->initiatorname, (prf_isid) ?
+			&i_buf[0] : "", dest_se_deve->mapped_lun);
+
+		if (dest_local_nexus)
+			continue;
+
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+
+	return 0;
+out:
+	/*
+	 * For the failure case, release everything from tid_dest_list
+	 * including *dest_pr_reg and the configfs dependances..
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+		dest_local_nexus = tidh->dest_local_nexus;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+		/*
+		 * Release any extra ALL_TG_PT=1 registrations for
+		 * the SPEC_I_PT=1 case.
+		 */
+		list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+				&dest_pr_reg->pr_reg_atp_list,
+				pr_reg_atp_mem_list) {
+			list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+			core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+			kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+		}
+
+		kfree(dest_pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
+
+		if (dest_local_nexus)
+			continue;
+
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+	return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held
+ */
+static int __core_scsi3_update_aptpl_buf(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len,
+	int clear_aptpl_metadata)
+{
+	struct se_lun *lun;
+	struct se_portal_group *tpg;
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct t10_pr_registration *pr_reg;
+	unsigned char tmp[512], isid_buf[32];
+	ssize_t len = 0;
+	int reg_count = 0;
+
+	memset(buf, 0, pr_aptpl_buf_len);
+	/*
+	 * Called to clear metadata once APTPL has been deactivated.
+	 */
+	if (clear_aptpl_metadata) {
+		snprintf(buf, pr_aptpl_buf_len,
+				"No Registrations or Reservations\n");
+		return 0;
+	}
+	/*
+	 * Walk the registration list..
+	 */
+	spin_lock(&T10_RES(su_dev)->registration_lock);
+	list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+			pr_reg_list) {
+
+		tmp[0] = '\0';
+		isid_buf[0] = '\0';
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		lun = pr_reg->pr_reg_tg_pt_lun;
+		/*
+		 * Write out any ISID value to APTPL metadata that was included
+		 * in the original registration.
+		 */
+		if (pr_reg->isid_present_at_reg)
+			snprintf(isid_buf, 32, "initiator_sid=%s\n",
+					pr_reg->pr_reg_isid);
+		/*
+		 * Include special metadata if the pr_reg matches the
+		 * reservation holder.
+		 */
+		if (dev->dev_pr_res_holder == pr_reg) {
+			snprintf(tmp, 512, "PR_REG_START: %d"
+				"\ninitiator_fabric=%s\n"
+				"initiator_node=%s\n%s"
+				"sa_res_key=%llu\n"
+				"res_holder=1\nres_type=%02x\n"
+				"res_scope=%02x\nres_all_tg_pt=%d\n"
+				"mapped_lun=%u\n", reg_count,
+				TPG_TFO(tpg)->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_res_type,
+				pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		} else {
+			snprintf(tmp, 512, "PR_REG_START: %d\n"
+				"initiator_fabric=%s\ninitiator_node=%s\n%s"
+				"sa_res_key=%llu\nres_holder=0\n"
+				"res_all_tg_pt=%d\nmapped_lun=%u\n",
+				reg_count, TPG_TFO(tpg)->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		}
+
+		if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+			printk(KERN_ERR "Unable to update renaming"
+				" APTPL metadata\n");
+			spin_unlock(&T10_RES(su_dev)->registration_lock);
+			return -1;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+
+		/*
+		 * Include information about the associated SCSI target port.
+		 */
+		snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
+			"tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
+			" %d\n", TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			TPG_TFO(tpg)->tpg_get_tag(tpg),
+			lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+
+		if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+			printk(KERN_ERR "Unable to update renaming"
+				" APTPL metadata\n");
+			spin_unlock(&T10_RES(su_dev)->registration_lock);
+			return -1;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+		reg_count++;
+	}
+	spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+	if (!(reg_count))
+		len += sprintf(buf+len, "No Registrations or Reservations");
+
+	return 0;
+}
+
+static int core_scsi3_update_aptpl_buf(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len,
+	int clear_aptpl_metadata)
+{
+	int ret;
+
+	spin_lock(&dev->dev_reservation_lock);
+	ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+				clear_aptpl_metadata);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+/*
+ * Called with struct se_device->aptpl_file_mutex held
+ */
+static int __core_scsi3_write_aptpl_to_file(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len)
+{
+	struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
+	struct file *file;
+	struct iovec iov[1];
+	mm_segment_t old_fs;
+	int flags = O_RDWR | O_CREAT | O_TRUNC;
+	char path[512];
+	int ret;
+
+	memset(iov, 0, sizeof(struct iovec));
+	memset(path, 0, 512);
+
+	if (strlen(&wwn->unit_serial[0]) > 512) {
+		printk(KERN_ERR "WWN value for struct se_device does not fit"
+			" into path buffer\n");
+		return -1;
+	}
+
+	snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+	file = filp_open(path, flags, 0600);
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		printk(KERN_ERR "filp_open(%s) for APTPL metadata"
+			" failed\n", path);
+		return -1;
+	}
+
+	iov[0].iov_base = &buf[0];
+	if (!(pr_aptpl_buf_len))
+		iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
+	else
+		iov[0].iov_len = pr_aptpl_buf_len;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+	set_fs(old_fs);
+
+	if (ret < 0) {
+		printk("Error writing APTPL metadata file: %s\n", path);
+		filp_close(file, NULL);
+		return -1;
+	}
+	filp_close(file, NULL);
+
+	return 0;
+}
+
+static int core_scsi3_update_and_write_aptpl(
+	struct se_device *dev,
+	unsigned char *in_buf,
+	u32 in_pr_aptpl_buf_len)
+{
+	unsigned char null_buf[64], *buf;
+	u32 pr_aptpl_buf_len;
+	int ret, clear_aptpl_metadata = 0;
+	/*
+	 * Can be called with a NULL pointer from PROUT service action CLEAR
+	 */
+	if (!(in_buf)) {
+		memset(null_buf, 0, 64);
+		buf = &null_buf[0];
+		/*
+		 * This will clear the APTPL metadata to:
+		 * "No Registrations or Reservations" status
+		 */
+		pr_aptpl_buf_len = 64;
+		clear_aptpl_metadata = 1;
+	} else {
+		buf = in_buf;
+		pr_aptpl_buf_len = in_pr_aptpl_buf_len;
+	}
+
+	ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+				clear_aptpl_metadata);
+	if (ret != 0)
+		return -1;
+	/*
+	 * __core_scsi3_write_aptpl_to_file() will call strlen()
+	 * on the passed buf to determine pr_aptpl_buf_len.
+	 */
+	ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
+	if (ret != 0)
+		return -1;
+
+	return ret;
+}
+
+static int core_scsi3_emulate_pro_register(
+	struct se_cmd *cmd,
+	u64 res_key,
+	u64 sa_res_key,
+	int aptpl,
+	int all_tg_pt,
+	int spec_i_pt,
+	int ignore_key)
+{
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *se_deve;
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	/* Used for APTPL metadata w/ UNREGISTER */
+	unsigned char *pr_aptpl_buf = NULL;
+	unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+	int pr_holder = 0, ret = 0, type;
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	se_tpg = se_sess->se_tpg;
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+
+	if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+		memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
+		TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
+				PR_REG_ISID_LEN);
+		isid_ptr = &isid_buf[0];
+	}
+	/*
+	 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
+	 */
+	pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!(pr_reg_e)) {
+		if (res_key) {
+			printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
+				" for SA REGISTER, returning CONFLICT\n");
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * Do nothing but return GOOD status.
+		 */
+		if (!(sa_res_key))
+			return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+
+		if (!(spec_i_pt)) {
+			/*
+			 * Perform the Service Action REGISTER on the Initiator
+			 * Port Endpoint that the PRO was received from on the
+			 * Logical Unit of the SCSI device server.
+			 */
+			ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+					se_sess->se_node_acl, se_deve, isid_ptr,
+					sa_res_key, all_tg_pt, aptpl,
+					ignore_key, 0);
+			if (ret != 0) {
+				printk(KERN_ERR "Unable to allocate"
+					" struct t10_pr_registration\n");
+				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			}
+		} else {
+			/*
+			 * Register both the Initiator port that received
+			 * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
+			 * TransportID from Parameter list and loop through
+			 * fabric dependent parameter list while calling
+			 * logic from of core_scsi3_alloc_registration() for
+			 * each TransportID provided SCSI Initiator Port/Device
+			 */
+			ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
+					isid_ptr, sa_res_key, all_tg_pt, aptpl);
+			if (ret != 0)
+				return ret;
+		}
+		/*
+		 * Nothing left to do for the APTPL=0 case.
+		 */
+		if (!(aptpl)) {
+			pr_tmpl->pr_aptpl_active = 0;
+			core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+			printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+					" REGISTER\n");
+			return 0;
+		}
+		/*
+		 * Locate the newly allocated local I_T Nexus *pr_reg, and
+		 * update the APTPL metadata information using its
+		 * preallocated *pr_reg->pr_aptpl_buf.
+		 */
+		pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+				se_sess->se_node_acl, se_sess);
+
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret)) {
+			pr_tmpl->pr_aptpl_active = 1;
+			printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+		}
+
+		core_scsi3_put_pr_reg(pr_reg);
+		return ret;
+	} else {
+		/*
+		 * Locate the existing *pr_reg via struct se_node_acl pointers
+		 */
+		pr_reg = pr_reg_e;
+		type = pr_reg->pr_res_type;
+
+		if (!(ignore_key)) {
+			if (res_key != pr_reg->pr_res_key) {
+				printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+					" res_key: 0x%016Lx does not match"
+					" existing SA REGISTER res_key:"
+					" 0x%016Lx\n", res_key,
+					pr_reg->pr_res_key);
+				core_scsi3_put_pr_reg(pr_reg);
+				return PYX_TRANSPORT_RESERVATION_CONFLICT;
+			}
+		}
+		if (spec_i_pt) {
+			printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
+				" set while sa_res_key=0\n");
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		}
+		/*
+		 * An existing ALL_TG_PT=1 registration being released
+		 * must also set ALL_TG_PT=1 in the incoming PROUT.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
+			printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+				" registration exists, but ALL_TG_PT=1 bit not"
+				" present in received PROUT\n");
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_INVALID_CDB_FIELD;
+		}
+		/*
+		 * Allocate APTPL metadata buffer used for UNREGISTER ops
+		 */
+		if (aptpl) {
+			pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
+						GFP_KERNEL);
+			if (!(pr_aptpl_buf)) {
+				printk(KERN_ERR "Unable to allocate"
+					" pr_aptpl_buf\n");
+				core_scsi3_put_pr_reg(pr_reg);
+				return PYX_TRANSPORT_LU_COMM_FAILURE;
+			}
+		}
+		/*
+		 * sa_res_key=0 Unregister Reservation Key for registered I_T
+		 * Nexus sa_res_key=1 Change Reservation Key for registered I_T
+		 * Nexus.
+		 */
+		if (!(sa_res_key)) {
+			pr_holder = core_scsi3_check_implict_release(
+					SE_DEV(cmd), pr_reg);
+			if (pr_holder < 0) {
+				kfree(pr_aptpl_buf);
+				core_scsi3_put_pr_reg(pr_reg);
+				return PYX_TRANSPORT_RESERVATION_CONFLICT;
+			}
+
+			spin_lock(&pr_tmpl->registration_lock);
+			/*
+			 * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+			 * and matching pr_res_key.
+			 */
+			if (pr_reg->pr_reg_all_tg_pt) {
+				list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+						&pr_tmpl->registration_list,
+						pr_reg_list) {
+
+					if (!(pr_reg_p->pr_reg_all_tg_pt))
+						continue;
+
+					if (pr_reg_p->pr_res_key != res_key)
+						continue;
+
+					if (pr_reg == pr_reg_p)
+						continue;
+
+					if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+						   pr_reg_p->pr_reg_nacl->initiatorname))
+						continue;
+
+					__core_scsi3_free_registration(dev,
+							pr_reg_p, NULL, 0);
+				}
+			}
+			/*
+			 * Release the calling I_T Nexus registration now..
+			 */
+			__core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
+							NULL, 1);
+			/*
+			 * From spc4r17, section 5.7.11.3 Unregistering
+			 *
+			 * If the persistent reservation is a registrants only
+			 * type, the device server shall establish a unit
+			 * attention condition for the initiator port associated
+			 * with every registered I_T nexus except for the I_T
+			 * nexus on which the PERSISTENT RESERVE OUT command was
+			 * received, with the additional sense code set to
+			 * RESERVATIONS RELEASED.
+			 */
+			if (pr_holder &&
+			   ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+			    (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
+				list_for_each_entry(pr_reg_p,
+						&pr_tmpl->registration_list,
+						pr_reg_list) {
+
+					core_scsi3_ua_allocate(
+						pr_reg_p->pr_reg_nacl,
+						pr_reg_p->pr_res_mapped_lun,
+						0x2A,
+						ASCQ_2AH_RESERVATIONS_RELEASED);
+				}
+			}
+			spin_unlock(&pr_tmpl->registration_lock);
+
+			if (!(aptpl)) {
+				pr_tmpl->pr_aptpl_active = 0;
+				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+				printk("SPC-3 PR: Set APTPL Bit Deactivated"
+						" for UNREGISTER\n");
+				return 0;
+			}
+
+			ret = core_scsi3_update_and_write_aptpl(dev,
+					&pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!(ret)) {
+				pr_tmpl->pr_aptpl_active = 1;
+				printk("SPC-3 PR: Set APTPL Bit Activated"
+						" for UNREGISTER\n");
+			}
+
+			kfree(pr_aptpl_buf);
+			return ret;
+		} else {
+			/*
+			 * Increment PRgeneration counter for struct se_device"
+			 * upon a successful REGISTER, see spc4r17 section 6.3.2
+			 * READ_KEYS service action.
+			 */
+			pr_reg->pr_res_generation = core_scsi3_pr_generation(
+							SE_DEV(cmd));
+			pr_reg->pr_res_key = sa_res_key;
+			printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+				" Key for %s to: 0x%016Lx PRgeneration:"
+				" 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
+				(ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
+				pr_reg->pr_reg_nacl->initiatorname,
+				pr_reg->pr_res_key, pr_reg->pr_res_generation);
+
+			if (!(aptpl)) {
+				pr_tmpl->pr_aptpl_active = 0;
+				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+				core_scsi3_put_pr_reg(pr_reg);
+				printk("SPC-3 PR: Set APTPL Bit Deactivated"
+						" for REGISTER\n");
+				return 0;
+			}
+
+			ret = core_scsi3_update_and_write_aptpl(dev,
+					&pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!(ret)) {
+				pr_tmpl->pr_aptpl_active = 1;
+				printk("SPC-3 PR: Set APTPL Bit Activated"
+						" for REGISTER\n");
+			}
+
+			kfree(pr_aptpl_buf);
+			core_scsi3_put_pr_reg(pr_reg);
+		}
+	}
+	return 0;
+}
+
+unsigned char *core_scsi3_pr_dump_type(int type)
+{
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		return "Write Exclusive Access";
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		return "Exclusive Access";
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		return "Write Exclusive Access, Registrants Only";
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		return "Exclusive Access, Registrants Only";
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		return "Write Exclusive Access, All Registrants";
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		return "Exclusive Access, All Registrants";
+	default:
+		break;
+	}
+
+	return "Unknown SPC-3 PR Type";
+}
+
+static int core_scsi3_pro_reserve(
+	struct se_cmd *cmd,
+	struct se_device *dev,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_dev_entry *se_deve;
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_res_holder;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int ret, prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	se_tpg = se_sess->se_tpg;
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+				se_sess);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RESERVE\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * An application client creates a persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RESERVE service action through
+	 * a registered I_T nexus with the following parameters:
+	 *    a) RESERVATION KEY set to the value of the reservation key that is
+	 * 	 registered with the logical unit for the I_T nexus; and
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * From above:
+	 *  b) TYPE field and SCOPE field set to the persistent reservation
+	 *     being created.
+	 *
+	 * Only one persistent reservation is allowed at a time per logical unit
+	 * and that persistent reservation has a scope of LU_SCOPE.
+	 */
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * See if we have an existing PR reservation holder pointer at
+	 * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
+	 * *pr_res_holder.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if ((pr_res_holder)) {
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command from an I_T nexus other than a persistent reservation
+		 * holder (see 5.7.10) that attempts to create a persistent
+		 * reservation when a persistent reservation already exists for
+		 * the logical unit, then the command shall be completed with
+		 * RESERVATION CONFLICT status.
+		 */
+		if (pr_res_holder != pr_reg) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s while reservation already held by"
+				" [%s]: %s, returning RESERVATION_CONFLICT\n",
+				CMD_TFO(cmd)->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If a persistent reservation holder attempts to modify the
+		 * type or scope of an existing persistent reservation, the
+		 * command shall be completed with RESERVATION CONFLICT status.
+		 */
+		if ((pr_res_holder->pr_res_type != type) ||
+		    (pr_res_holder->pr_res_scope != scope)) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s trying to change TYPE and/or SCOPE,"
+				" while reservation already held by [%s]: %s,"
+				" returning RESERVATION_CONFLICT\n",
+				CMD_TFO(cmd)->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command with RESERVE service action where the TYPE field and
+		 * the SCOPE field contain the same values as the existing type
+		 * and scope from a persistent reservation holder, it shall not
+		 * make any change to the existing persistent reservation and
+		 * shall completethe command with GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+	}
+	/*
+	 * Otherwise, our *pr_reg becomes the PR reservation holder for said
+	 * TYPE/SCOPE.  Also set the received scope and type in *pr_reg.
+	 */
+	pr_reg->pr_res_scope = scope;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_holder = 1;
+	dev->dev_pr_res_holder = pr_reg;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+			CMD_TFO(cmd)->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			(prf_isid) ? &i_buf[0] : "");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+					" for RESERVE\n");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg);
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_reserve(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	int ret = 0;
+
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
+		break;
+	default:
+		printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
+			" 0x%02x\n", type);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_release(
+	struct se_device *dev,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int explict)
+{
+	struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Go ahead and release the current PR reservation holder.
+	 */
+	dev->dev_pr_res_holder = NULL;
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
+		tfo->get_fabric_name(), se_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+	/*
+	 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
+	 */
+	pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
+}
+
+static int core_scsi3_emulate_pro_release(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	int ret, all_reg = 0;
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RELEASE\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * If there is no persistent reservation or in response to a persistent
+	 * reservation release request from a registered I_T nexus that is not a
+	 * persistent reservation holder (see 5.7.10), the device server shall
+	 * do the following:
+	 *
+	 *     a) Not release the persistent reservation, if any;
+	 *     b) Not remove any registrations; and
+	 *     c) Complete the command with GOOD status.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!(pr_res_holder)) {
+		/*
+		 * No persistent reservation, return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+	}
+	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+		all_reg = 1;
+
+	if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
+		/*
+		 * Non 'All Registrants' PR Type cases..
+		 * Release request from a registered I_T nexus that is not a
+		 * persistent reservation holder. return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * Only the persistent reservation holder (see 5.7.10) is allowed to
+	 * release a persistent reservation.
+	 *
+	 * An application client releases the persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RELEASE service action through
+	 * an I_T nexus that is a persistent reservation holder with the
+	 * following parameters:
+	 *
+	 *     a) RESERVATION KEY field set to the value of the reservation key
+	 *	  that is registered with the logical unit for the I_T nexus;
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing and above:
+	 *
+	 * b) TYPE field and SCOPE field set to match the persistent
+	 *    reservation being released.
+	 */
+	if ((pr_res_holder->pr_res_type != type) ||
+	    (pr_res_holder->pr_res_scope != scope)) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
+			" reservation from [%s]: %s with different TYPE "
+			"and/or SCOPE  while reservation already held by"
+			" [%s]: %s, returning RESERVATION_CONFLICT\n",
+			CMD_TFO(cmd)->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+			pr_res_holder->pr_reg_nacl->initiatorname);
+
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * In response to a persistent reservation release request from the
+	 * persistent reservation holder the device server shall perform a
+	 * release by doing the following as an uninterrupted series of actions:
+	 * a) Release the persistent reservation;
+	 * b) Not remove any registration(s);
+	 * c) If the released persistent reservation is a registrants only type
+	 * or all registrants type persistent reservation,
+	 *    the device server shall establish a unit attention condition for
+	 *    the initiator port associated with every regis-
+	 *    tered I_T nexus other than I_T nexus on which the PERSISTENT
+	 *    RESERVE OUT command with RELEASE service action was received,
+	 *    with the additional sense code set to RESERVATIONS RELEASED; and
+	 * d) If the persistent reservation is of any other type, the device
+	 *    server shall not establish a unit attention condition.
+	 */
+	__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
+			pr_reg, 1);
+
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
+	    (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		/*
+		 * If no UNIT ATTENTION conditions will be established for
+		 * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
+		 * go ahead and check for APTPL=1 update+write below
+		 */
+		goto write_aptpl;
+	}
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
+			pr_reg_list) {
+		/*
+		 * Do not establish a UNIT ATTENTION condition
+		 * for the calling I_T Nexus
+		 */
+		if (pr_reg_p == pr_reg)
+			continue;
+
+		core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
+				pr_reg_p->pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+write_aptpl:
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg);
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_clear(
+	struct se_cmd *cmd,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	u32 pr_res_mapped_lun = 0;
+	int calling_it_nexus = 0;
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+			se_sess->se_node_acl, se_sess);
+	if (!(pr_reg_n)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for CLEAR\n");
+			return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * From spc4r17 section 5.7.11.6, Clearing:
+	 *
+	 * Any application client may release the persistent reservation and
+	 * remove all registrations from a device server by issuing a
+	 * PERSISTENT RESERVE OUT command with CLEAR service action through a
+	 * registered I_T nexus with the following parameter:
+	 *
+	 *	a) RESERVATION KEY field set to the value of the reservation key
+	 * 	   that is registered with the logical unit for the I_T nexus.
+	 */
+	if (res_key != pr_reg_n->pr_res_key) {
+		printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+			" res_key: 0x%016Lx does not match"
+			" existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * a) Release the persistent reservation, if any;
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+			pr_res_holder, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * b) Remove all registration(s) (see spc4r17 5.7.7);
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg, NULL,
+					calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every registered I_T nexus other
+		 *    than the I_T nexus on which the PERSISTENT RESERVE OUT
+		 *    command with CLEAR service action was received, with the
+		 *    additional sense code set to RESERVATIONS PREEMPTED.
+		 */
+		if (!(calling_it_nexus))
+			core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
+		CMD_TFO(cmd)->get_fabric_name());
+
+	if (pr_tmpl->pr_aptpl_active) {
+		core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+		printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+				" for CLEAR\n");
+	}
+
+	core_scsi3_pr_generation(dev);
+	return 0;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_preempt(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int type,
+	int scope,
+	int abort)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Do an implict RELEASE of the existing reservation.
+	 */
+	if (dev->dev_pr_res_holder)
+		__core_scsi3_complete_pro_release(dev, nacl,
+				dev->dev_pr_res_holder, 0);
+
+	dev->dev_pr_res_holder = pr_reg;
+	pr_reg->pr_res_holder = 1;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+		core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+		nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+	/*
+	 * For PREEMPT_AND_ABORT, add the preempting reservation's
+	 * struct t10_pr_registration to the list that will be compared
+	 * against received CDBs..
+	 */
+	if (preempt_and_abort_list)
+		list_add_tail(&pr_reg->pr_reg_abort_list,
+				preempt_and_abort_list);
+}
+
+static void core_scsi3_release_preempt_and_abort(
+	struct list_head *preempt_and_abort_list,
+	struct t10_pr_registration *pr_reg_holder)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+				pr_reg_abort_list) {
+
+		list_del(&pr_reg->pr_reg_abort_list);
+		if (pr_reg_holder == pr_reg)
+			continue;
+		if (pr_reg->pr_res_holder) {
+			printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
+			continue;
+		}
+
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+}
+
+int core_scsi3_check_cdb_abort_and_preempt(
+	struct list_head *preempt_and_abort_list,
+	struct se_cmd *cmd)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+				pr_reg_abort_list) {
+		if (pr_reg->pr_res_key == cmd->pr_res_key)
+			return 0;
+	}
+
+	return 1;
+}
+
+static int core_scsi3_pro_preempt(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key,
+	u64 sa_res_key,
+	int abort)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *se_deve;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct list_head preempt_and_abort_list;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	u32 pr_res_mapped_lun = 0;
+	int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
+	int prh_type = 0, prh_scope = 0, ret;
+
+	if (!(se_sess))
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+				se_sess);
+	if (!(pr_reg_n)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for PREEMPT%s\n",
+			(abort) ? "_AND_ABORT" : "");
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	if (pr_reg_n->pr_res_key != res_key) {
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	INIT_LIST_HEAD(&preempt_and_abort_list);
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder &&
+	   ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
+		all_reg = 1;
+
+	if (!(all_reg) && !(sa_res_key)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
+	 *
+	 * If the SERVICE ACTION RESERVATION KEY field does not identify a
+	 * persistent reservation holder or there is no persistent reservation
+	 * holder (i.e., there is no persistent reservation), then the device
+	 * server shall perform a preempt by doing the following in an
+	 * uninterrupted series of actions. (See below..)
+	 */
+	if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
+		/*
+		 * No existing or SA Reservation Key matching reservations..
+		 *
+		 * PROUT SA PREEMPT with All Registrant type reservations are
+		 * allowed to be processed without a matching SA Reservation Key
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+			/*
+			 * Removing of registrations in non all registrants
+			 * type reservations without a matching SA reservation
+			 * key.
+			 *
+			 * a) Remove the registrations for all I_T nexuses
+			 *    specified by the SERVICE ACTION RESERVATION KEY
+			 *    field;
+			 * b) Ignore the contents of the SCOPE and TYPE fields;
+			 * c) Process tasks as defined in 5.7.1; and
+			 * d) Establish a unit attention condition for the
+			 *    initiator port associated with every I_T nexus
+			 *    that lost its registration other than the I_T
+			 *    nexus on which the PERSISTENT RESERVE OUT command
+			 *    was received, with the additional sense code set
+			 *    to REGISTRATIONS PREEMPTED.
+			 */
+			if (!(all_reg)) {
+				if (pr_reg->pr_res_key != sa_res_key)
+					continue;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(abort) ? &preempt_and_abort_list :
+						NULL, calling_it_nexus);
+				released_regs++;
+			} else {
+				/*
+				 * Case for any existing all registrants type
+				 * reservation, follow logic in spc4r17 section
+				 * 5.7.11.4 Preempting, Table 52 and Figure 7.
+				 *
+				 * For a ZERO SA Reservation key, release
+				 * all other registrations and do an implict
+				 * release of active persistent reservation.
+				 *
+				 * For a non-ZERO SA Reservation key, only
+				 * release the matching reservation key from
+				 * registrations.
+				 */
+				if ((sa_res_key) &&
+				     (pr_reg->pr_res_key != sa_res_key))
+					continue;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				if (calling_it_nexus)
+					continue;
+
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(abort) ? &preempt_and_abort_list :
+						NULL, 0);
+				released_regs++;
+			}
+			if (!(calling_it_nexus))
+				core_scsi3_ua_allocate(pr_reg_nacl,
+					pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_RESERVATIONS_PREEMPTED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
+		 * a PREEMPT AND ABORT service action sets the SERVICE ACTION
+		 * RESERVATION KEY field to a value that does not match any
+		 * registered reservation key, then the device server shall
+		 * complete the command with RESERVATION CONFLICT status.
+		 */
+		if (!(released_regs)) {
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg_n);
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * For an existing all registrants type reservation
+		 * with a zero SA rservation key, preempt the existing
+		 * reservation with the new PR type and scope.
+		 */
+		if (pr_res_holder && all_reg && !(sa_res_key)) {
+			__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+				(abort) ? &preempt_and_abort_list : NULL,
+				type, scope, abort);
+
+			if (abort)
+				core_scsi3_release_preempt_and_abort(
+					&preempt_and_abort_list, pr_reg_n);
+		}
+		spin_unlock(&dev->dev_reservation_lock);
+
+		if (pr_tmpl->pr_aptpl_active) {
+			ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+					&pr_reg_n->pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!(ret))
+				printk(KERN_INFO "SPC-3 PR: Updated APTPL"
+					" metadata for  PREEMPT%s\n", (abort) ?
+					"_AND_ABORT" : "");
+		}
+
+		core_scsi3_put_pr_reg(pr_reg_n);
+		core_scsi3_pr_generation(SE_DEV(cmd));
+		return 0;
+	}
+	/*
+	 * The PREEMPTing SA reservation key matches that of the
+	 * existing persistent reservation, first, we check if
+	 * we are preempting our own reservation.
+	 * From spc4r17, section 5.7.11.4.3 Preempting
+	 * persistent reservations and registration handling
+	 *
+	 * If an all registrants persistent reservation is not
+	 * present, it is not an error for the persistent
+	 * reservation holder to preempt itself (i.e., a
+	 * PERSISTENT RESERVE OUT with a PREEMPT service action
+	 * or a PREEMPT AND ABORT service action with the
+	 * SERVICE ACTION RESERVATION KEY value equal to the
+	 * persistent reservation holder's reservation key that
+	 * is received from the persistent reservation holder).
+	 * In that case, the device server shall establish the
+	 * new persistent reservation and maintain the
+	 * registration.
+	 */
+	prh_type = pr_res_holder->pr_res_type;
+	prh_scope = pr_res_holder->pr_res_scope;
+	/*
+	 * If the SERVICE ACTION RESERVATION KEY field identifies a
+	 * persistent reservation holder (see 5.7.10), the device
+	 * server shall perform a preempt by doing the following as
+	 * an uninterrupted series of actions:
+	 *
+	 * a) Release the persistent reservation for the holder
+	 *    identified by the SERVICE ACTION RESERVATION KEY field;
+	 */
+	if (pr_reg_n != pr_res_holder)
+		__core_scsi3_complete_pro_release(dev,
+				pr_res_holder->pr_reg_nacl,
+				dev->dev_pr_res_holder, 0);
+	/*
+	 * b) Remove the registrations for all I_T nexuses identified
+	 *    by the SERVICE ACTION RESERVATION KEY field, except the
+	 *    I_T nexus that is being used for the PERSISTENT RESERVE
+	 *    OUT command. If an all registrants persistent reservation
+	 *    is present and the SERVICE ACTION RESERVATION KEY field
+	 *    is set to zero, then all registrations shall be removed
+	 *    except for that of the I_T nexus that is being used for
+	 *    the PERSISTENT RESERVE OUT command;
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		if (calling_it_nexus)
+			continue;
+
+		if (pr_reg->pr_res_key != sa_res_key)
+			continue;
+
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg,
+				(abort) ? &preempt_and_abort_list : NULL,
+				calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every I_T nexus that lost its
+		 *    persistent reservation and/or registration, with the
+		 *    additional sense code set to REGISTRATIONS PREEMPTED;
+		 */
+		core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+				ASCQ_2AH_RESERVATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * c) Establish a persistent reservation for the preempting
+	 *    I_T nexus using the contents of the SCOPE and TYPE fields;
+	 */
+	__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+			(abort) ? &preempt_and_abort_list : NULL,
+			type, scope, abort);
+	/*
+	 * d) Process tasks as defined in 5.7.1;
+	 * e) See above..
+	 * f) If the type or scope has changed, then for every I_T nexus
+	 *    whose reservation key was not removed, except for the I_T
+	 *    nexus on which the PERSISTENT RESERVE OUT command was
+	 *    received, the device server shall establish a unit
+	 *    attention condition for the initiator port associated with
+	 *    that I_T nexus, with the additional sense code set to
+	 *    RESERVATIONS RELEASED. If the type or scope have not
+	 *    changed, then no unit attention condition(s) shall be
+	 *    established for this reason.
+	 */
+	if ((prh_type != type) || (prh_scope != scope)) {
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+
+			calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+			if (calling_it_nexus)
+				continue;
+
+			core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
+					pr_reg->pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_RESERVATIONS_RELEASED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Call LUN_RESET logic upon list of struct t10_pr_registration,
+	 * All received CDBs for the matching existing reservation and
+	 * registrations undergo ABORT_TASK logic.
+	 *
+	 * From there, core_scsi3_release_preempt_and_abort() will
+	 * release every registration in the list (which have already
+	 * been removed from the primary pr_reg list), except the
+	 * new persistent reservation holder, the calling Initiator Port.
+	 */
+	if (abort) {
+		core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
+		core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
+						pr_reg_n);
+	}
+
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg_n->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+				"%s\n", (abort) ? "_AND_ABORT" : "");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg_n);
+	core_scsi3_pr_generation(SE_DEV(cmd));
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_preempt(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key,
+	u64 sa_res_key,
+	int abort)
+{
+	int ret = 0;
+
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		ret = core_scsi3_pro_preempt(cmd, type, scope,
+				res_key, sa_res_key, abort);
+		break;
+	default:
+		printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
+			" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	return ret;
+}
+
+
+static int core_scsi3_emulate_pro_register_and_move(
+	struct se_cmd *cmd,
+	u64 res_key,
+	u64 sa_res_key,
+	int aptpl,
+	int unreg)
+{
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *se_deve, *dest_se_deve = NULL;
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
+	struct se_port *se_port;
+	struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
+	struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+	struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	unsigned char *initiator_str;
+	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+	u32 tid_len, tmp_tid_len;
+	int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+	unsigned short rtpi;
+	unsigned char proto_ident;
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	memset(dest_iport, 0, 64);
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	se_tpg = se_sess->se_tpg;
+	tf_ops = TPG_TFO(se_tpg);
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Follow logic from spc4r17 Section 5.7.8, Table 50 --
+	 *	Register behaviors for a REGISTER AND MOVE service action
+	 *
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+				se_sess);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
+			" *pr_reg for REGISTER_AND_MOVE\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * The provided reservation key much match the existing reservation key
+	 * provided during this initiator's I_T nexus registration.
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
+			" res_key: 0x%016Lx does not match existing SA REGISTER"
+			" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * The service active reservation key needs to be non zero
+	 */
+	if (!(sa_res_key)) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
+			" sa_res_key\n");
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * Determine the Relative Target Port Identifier where the reservation
+	 * will be moved to for the TransportID containing SCSI initiator WWN
+	 * information.
+	 */
+	rtpi = (buf[18] & 0xff) << 8;
+	rtpi |= buf[19] & 0xff;
+	tid_len = (buf[20] & 0xff) << 24;
+	tid_len |= (buf[21] & 0xff) << 16;
+	tid_len |= (buf[22] & 0xff) << 8;
+	tid_len |= buf[23] & 0xff;
+
+	if ((tid_len + 24) != cmd->data_length) {
+		printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+			" does not equal CDB data_length: %u\n", tid_len,
+			cmd->data_length);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
+		if (se_port->sep_rtpi != rtpi)
+			continue;
+		dest_se_tpg = se_port->sep_tpg;
+		if (!(dest_se_tpg))
+			continue;
+		dest_tf_ops = TPG_TFO(dest_se_tpg);
+		if (!(dest_tf_ops))
+			continue;
+
+		atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&dev->se_port_lock);
+
+		ret = core_scsi3_tpg_depend_item(dest_se_tpg);
+		if (ret != 0) {
+			printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
+				" for dest_se_tpg\n");
+			atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
+			smp_mb__after_atomic_dec();
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_LU_COMM_FAILURE;
+		}
+
+		spin_lock(&dev->se_port_lock);
+		break;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	if (!(dest_se_tpg) || (!dest_tf_ops)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" fabric ops from Relative Target Port Identifier:"
+			" %hu\n", rtpi);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	proto_ident = (buf[24] & 0x0f);
+#if 0
+	printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+			" 0x%02x\n", proto_ident);
+#endif
+	if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
+			" proto_ident: 0x%02x does not match ident: 0x%02x"
+			" from fabric: %s\n", proto_ident,
+			dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
+			dest_tf_ops->get_fabric_name());
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+			" containg a valid tpg_parse_pr_out_transport_id"
+			" function pointer\n");
+		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+		goto out;
+	}
+	initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
+			(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
+	if (!(initiator_str)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" initiator_str from Transport ID\n");
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+		" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
+		"port" : "device", initiator_str, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action specifies a TransportID that is the same as the initiator port
+	 * of the I_T nexus for the command received, then the command shall
+	 * be terminated with CHECK CONDITION status, with the sense key set to
+	 * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
+	 * IN PARAMETER LIST.
+	 */
+	pr_reg_nacl = pr_reg->pr_reg_nacl;
+	matching_iname = (!strcmp(initiator_str,
+				  pr_reg_nacl->initiatorname)) ? 1 : 0;
+	if (!(matching_iname))
+		goto after_iport_check;
+
+	if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+			" matches: %s on received I_T Nexus\n", initiator_str,
+			pr_reg_nacl->initiatorname);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+			" matches: %s %s on received I_T Nexus\n",
+			initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
+			pr_reg->pr_reg_isid);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+after_iport_check:
+	/*
+	 * Locate the destination struct se_node_acl from the received Transport ID
+	 */
+	spin_lock_bh(&dest_se_tpg->acl_node_lock);
+	dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
+				initiator_str);
+	if (dest_node_acl) {
+		atomic_inc(&dest_node_acl->acl_pr_ref_count);
+		smp_mb__after_atomic_inc();
+	}
+	spin_unlock_bh(&dest_se_tpg->acl_node_lock);
+
+	if (!(dest_node_acl)) {
+		printk(KERN_ERR "Unable to locate %s dest_node_acl for"
+			" TransportID%s\n", dest_tf_ops->get_fabric_name(),
+			initiator_str);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+	if (ret != 0) {
+		printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
+			" dest_node_acl\n");
+		atomic_dec(&dest_node_acl->acl_pr_ref_count);
+		smp_mb__after_atomic_dec();
+		dest_node_acl = NULL;
+		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+		goto out;
+	}
+#if 0
+	printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+		" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname);
+#endif
+	/*
+	 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
+	 * PORT IDENTIFIER.
+	 */
+	dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
+	if (!(dest_se_deve)) {
+		printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
+			" %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+	if (ret < 0) {
+		printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
+		atomic_dec(&dest_se_deve->pr_ref_count);
+		smp_mb__after_atomic_dec();
+		dest_se_deve = NULL;
+		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+		goto out;
+	}
+#if 0
+	printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+		" ACL for dest_se_deve->mapped_lun: %u\n",
+		dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
+		dest_se_deve->mapped_lun);
+#endif
+	/*
+	 * A persistent reservation needs to already existing in order to
+	 * successfully complete the REGISTER_AND_MOVE service action..
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!(pr_res_holder)) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
+			" currently held\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+		goto out;
+	}
+	/*
+	 * The received on I_T Nexus must be the reservation holder.
+	 *
+	 * From spc4r17 section 5.7.8  Table 50 --
+	 * 	Register behaviors for a REGISTER AND MOVE service action
+	 */
+	if (pr_res_holder != pr_reg) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+			" Nexus is not reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+		goto out;
+	}
+	/*
+	 * From spc4r17 section 5.7.8: registering and moving reservation
+	 *
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action is received and the established persistent reservation is a
+	 * Write Exclusive - All Registrants type or Exclusive Access -
+	 * All Registrants type reservation, then the command shall be completed
+	 * with RESERVATION CONFLICT status.
+	 */
+	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+			" reservation for type: %s\n",
+			core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+		goto out;
+	}
+	pr_res_nacl = pr_res_holder->pr_reg_nacl;
+	/*
+	 * b) Ignore the contents of the (received) SCOPE and TYPE fields;
+	 */
+	type = pr_res_holder->pr_res_type;
+	scope = pr_res_holder->pr_res_type;
+	/*
+	 * c) Associate the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field with the I_T nexus specified as the
+	 *    destination of the register and move, where:
+	 *    A) The I_T nexus is specified by the TransportID and the
+	 *	 RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
+	 *    B) Regardless of the TransportID format used, the association for
+	 *       the initiator port is based on either the initiator port name
+	 *       (see 3.1.71) on SCSI transport protocols where port names are
+	 *       required or the initiator port identifier (see 3.1.70) on SCSI
+	 *       transport protocols where port names are not required;
+	 * d) Register the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field;
+	 * e) Retain the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field and associated information;
+	 *
+	 * Also, It is not an error for a REGISTER AND MOVE service action to
+	 * register an I_T nexus that is already registered with the same
+	 * reservation key or a different reservation key.
+	 */
+	dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+	if (!(dest_pr_reg)) {
+		ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+				dest_node_acl, dest_se_deve, iport_ptr,
+				sa_res_key, 0, aptpl, 2, 1);
+		if (ret != 0) {
+			spin_unlock(&dev->dev_reservation_lock);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+						iport_ptr);
+		new_reg = 1;
+	}
+	/*
+	 * f) Release the persistent reservation for the persistent reservation
+	 *    holder (i.e., the I_T nexus on which the
+	 */
+	__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+			dev->dev_pr_res_holder, 0);
+	/*
+	 * g) Move the persistent reservation to the specified I_T nexus using
+	 *    the same scope and type as the persistent reservation released in
+	 *    item f); and
+	 */
+	dev->dev_pr_res_holder = dest_pr_reg;
+	dest_pr_reg->pr_res_holder = 1;
+	dest_pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Increment PRGeneration for existing registrations..
+	 */
+	if (!(new_reg))
+		dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+		" created new reservation holder TYPE: %s on object RTPI:"
+		" %hu  PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
+		core_scsi3_pr_dump_type(type), rtpi,
+		dest_pr_reg->pr_res_generation);
+	printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
+		" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
+		tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * It is now safe to release configfs group dependencies for destination
+	 * of Transport ID Initiator Device/Port Identifier
+	 */
+	core_scsi3_lunacl_undepend_item(dest_se_deve);
+	core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+	/*
+	 * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
+	 * nexus on which PERSISTENT RESERVE OUT command was received.
+	 */
+	if (unreg) {
+		spin_lock(&pr_tmpl->registration_lock);
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 1);
+		spin_unlock(&pr_tmpl->registration_lock);
+	} else
+		core_scsi3_put_pr_reg(pr_reg);
+
+	/*
+	 * Clear the APTPL metadata if APTPL has been disabled, otherwise
+	 * write out the updated metadata to struct file for this SCSI device.
+	 */
+	if (!(aptpl)) {
+		pr_tmpl->pr_aptpl_active = 0;
+		core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+		printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+				" REGISTER_AND_MOVE\n");
+	} else {
+		pr_tmpl->pr_aptpl_active = 1;
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&dest_pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk("SPC-3 PR: Set APTPL Bit Activated for"
+					" REGISTER_AND_MOVE\n");
+	}
+
+	core_scsi3_put_pr_reg(dest_pr_reg);
+	return 0;
+out:
+	if (dest_se_deve)
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+	if (dest_node_acl)
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+	core_scsi3_put_pr_reg(pr_reg);
+	return ret;
+}
+
+static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
+	__v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * See spc4r17 section 6.14 Table 170
+ */
+static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
+{
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u64 res_key, sa_res_key;
+	int sa, scope, type, aptpl;
+	int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
+	/*
+	 * FIXME: A NULL struct se_session pointer means an this is not coming from
+	 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
+	 */
+	if (!(SE_SESS(cmd)))
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	if (cmd->data_length < 24) {
+		printk(KERN_WARNING "SPC-PR: Recieved PR OUT parameter list"
+			" length too small: %u\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
+	 */
+	sa = (cdb[1] & 0x1f);
+	scope = (cdb[2] & 0xf0);
+	type = (cdb[2] & 0x0f);
+	/*
+	 * From PERSISTENT_RESERVE_OUT parameter list (payload)
+	 */
+	res_key = core_scsi3_extract_reservation_key(&buf[0]);
+	sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
+	/*
+	 * REGISTER_AND_MOVE uses a different SA parameter list containing
+	 * SCSI TransportIDs.
+	 */
+	if (sa != PRO_REGISTER_AND_MOVE) {
+		spec_i_pt = (buf[20] & 0x08);
+		all_tg_pt = (buf[20] & 0x04);
+		aptpl = (buf[20] & 0x01);
+	} else {
+		aptpl = (buf[17] & 0x01);
+		unreg = (buf[17] & 0x02);
+	}
+	/*
+	 * SPEC_I_PT=1 is only valid for Service action: REGISTER
+	 */
+	if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	/*
+	 * From spc4r17 section 6.14:
+	 *
+	 * If the SPEC_I_PT bit is set to zero, the service action is not
+	 * REGISTER AND MOVE, and the parameter list length is not 24, then
+	 * the command shall be terminated with CHECK CONDITION status, with
+	 * the sense key set to ILLEGAL REQUEST, and the additional sense
+	 * code set to PARAMETER LIST LENGTH ERROR.
+	 */
+	if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+	    (cmd->data_length != 24)) {
+		printk(KERN_WARNING "SPC-PR: Recieved PR OUT illegal parameter"
+			" list length: %u\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * (core_scsi3_emulate_pro_* function parameters
+	 * are defined by spc4r17 Table 174:
+	 * PERSISTENT_RESERVE_OUT service actions and valid parameters.
+	 */
+	switch (sa) {
+	case PRO_REGISTER:
+		return core_scsi3_emulate_pro_register(cmd,
+			res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
+	case PRO_RESERVE:
+		return core_scsi3_emulate_pro_reserve(cmd,
+			type, scope, res_key);
+	case PRO_RELEASE:
+		return core_scsi3_emulate_pro_release(cmd,
+			type, scope, res_key);
+	case PRO_CLEAR:
+		return core_scsi3_emulate_pro_clear(cmd, res_key);
+	case PRO_PREEMPT:
+		return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, 0);
+	case PRO_PREEMPT_AND_ABORT:
+		return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, 1);
+	case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+		return core_scsi3_emulate_pro_register(cmd,
+			0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
+	case PRO_REGISTER_AND_MOVE:
+		return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
+				sa_res_key, aptpl, unreg);
+	default:
+		printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+			" action: 0x%02x\n", cdb[1] & 0x1f);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_KEYS
+ *
+ * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
+ */
+static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u32 add_len = 0, off = 8;
+
+	if (cmd->data_length < 8) {
+		printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+	buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+	buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+	buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+	spin_lock(&T10_RES(su_dev)->registration_lock);
+	list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+			pr_reg_list) {
+		/*
+		 * Check for overflow of 8byte PRI READ_KEYS payload and
+		 * next reservation key list descriptor.
+		 */
+		if ((add_len + 8) > (cmd->data_length - 8))
+			break;
+
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+
+		add_len += 8;
+	}
+	spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
+ *
+ * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
+ */
+static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u64 pr_res_key;
+	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
+
+	if (cmd->data_length < 8) {
+		printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+	buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+	buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+	buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+	spin_lock(&se_dev->dev_reservation_lock);
+	pr_reg = se_dev->dev_pr_res_holder;
+	if ((pr_reg)) {
+		/*
+		 * Set the hardcoded Additional Length
+		 */
+		buf[4] = ((add_len >> 24) & 0xff);
+		buf[5] = ((add_len >> 16) & 0xff);
+		buf[6] = ((add_len >> 8) & 0xff);
+		buf[7] = (add_len & 0xff);
+
+		if (cmd->data_length < 22) {
+			spin_unlock(&se_dev->dev_reservation_lock);
+			return 0;
+		}
+		/*
+		 * Set the Reservation key.
+		 *
+		 * From spc4r17, section 5.7.10:
+		 * A persistent reservation holder has its reservation key
+		 * returned in the parameter data from a PERSISTENT
+		 * RESERVE IN command with READ RESERVATION service action as
+		 * follows:
+		 * a) For a persistent reservation of the type Write Exclusive
+		 *    - All Registrants or Exclusive Access ­ All Regitrants,
+		 *      the reservation key shall be set to zero; or
+		 * b) For all other persistent reservation types, the
+		 *    reservation key shall be set to the registered
+		 *    reservation key for the I_T nexus that holds the
+		 *    persistent reservation.
+		 */
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+			pr_res_key = 0;
+		else
+			pr_res_key = pr_reg->pr_res_key;
+
+		buf[8] = ((pr_res_key >> 56) & 0xff);
+		buf[9] = ((pr_res_key >> 48) & 0xff);
+		buf[10] = ((pr_res_key >> 40) & 0xff);
+		buf[11] = ((pr_res_key >> 32) & 0xff);
+		buf[12] = ((pr_res_key >> 24) & 0xff);
+		buf[13] = ((pr_res_key >> 16) & 0xff);
+		buf[14] = ((pr_res_key >> 8) & 0xff);
+		buf[15] = (pr_res_key & 0xff);
+		/*
+		 * Set the SCOPE and TYPE
+		 */
+		buf[21] = (pr_reg->pr_res_scope & 0xf0) |
+			  (pr_reg->pr_res_type & 0x0f);
+	}
+	spin_unlock(&se_dev->dev_reservation_lock);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
+ *
+ * See spc4r17 section 6.13.4 Table 165
+ */
+static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u16 add_len = 8; /* Hardcoded to 8. */
+
+	if (cmd->data_length < 6) {
+		printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+			" %u too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((add_len << 8) & 0xff);
+	buf[1] = (add_len & 0xff);
+	buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
+	buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
+	buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
+	buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
+	/*
+	 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+	 * set the TMV: Task Mask Valid bit.
+	 */
+	buf[3] |= 0x80;
+	/*
+	 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+	 */
+	buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+	/*
+	 * PTPL_A: Persistence across Target Power Loss Active bit
+	 */
+	if (pr_tmpl->pr_aptpl_active)
+		buf[3] |= 0x01;
+	/*
+	 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
+	 */
+	buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+	buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+	buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+	buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+	buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+	buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
+ *
+ * See spc4r17 section 6.13.5 Table 168 and 169
+ */
+static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = SE_DEV(cmd);
+	struct se_node_acl *se_nacl;
+	struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+	u32 off = 8; /* off into first Full Status descriptor */
+	int format_code = 0;
+
+	if (cmd->data_length < 8) {
+		printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+	buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+	buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+	buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		se_nacl = pr_reg->pr_reg_nacl;
+		se_tpg = pr_reg->pr_reg_nacl->se_tpg;
+		add_desc_len = 0;
+
+		atomic_inc(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Determine expected length of $FABRIC_MOD specific
+		 * TransportID full status descriptor..
+		 */
+		exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
+				se_tpg, se_nacl, pr_reg, &format_code);
+
+		if ((exp_desc_len + add_len) > cmd->data_length) {
+			printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
+				" out of buffer: %d\n", cmd->data_length);
+			spin_lock(&pr_tmpl->registration_lock);
+			atomic_dec(&pr_reg->pr_res_holders);
+			smp_mb__after_atomic_dec();
+			break;
+		}
+		/*
+		 * Set RESERVATION KEY
+		 */
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+		off += 4; /* Skip Over Reserved area */
+
+		/*
+		 * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt)
+			buf[off] = 0x02;
+		/*
+		 * The struct se_lun pointer will be present for the
+		 * reservation holder for PR_HOLDER bit.
+		 *
+		 * Also, if this registration is the reservation
+		 * holder, fill in SCOPE and TYPE in the next byte.
+		 */
+		if (pr_reg->pr_res_holder) {
+			buf[off++] |= 0x01;
+			buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
+				     (pr_reg->pr_res_type & 0x0f);
+		} else
+			off += 2;
+
+		off += 4; /* Skip over reserved area */
+		/*
+		 * From spc4r17 6.3.15:
+		 *
+		 * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
+		 * IDENTIFIER field contains the relative port identifier (see
+		 * 3.1.120) of the target port that is part of the I_T nexus
+		 * described by this full status descriptor. If the ALL_TG_PT
+		 * bit is set to one, the contents of the RELATIVE TARGET PORT
+		 * IDENTIFIER field are not defined by this standard.
+		 */
+		if (!(pr_reg->pr_reg_all_tg_pt)) {
+			struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
+
+			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+			buf[off++] = (port->sep_rtpi & 0xff);
+		} else
+			off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
+
+		/*
+		 * Now, have the $FABRIC_MOD fill in the protocol identifier
+		 */
+		desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
+				se_nacl, pr_reg, &format_code, &buf[off+4]);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		atomic_dec(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_dec();
+		/*
+		 * Set the ADDITIONAL DESCRIPTOR LENGTH
+		 */
+		buf[off++] = ((desc_len >> 24) & 0xff);
+		buf[off++] = ((desc_len >> 16) & 0xff);
+		buf[off++] = ((desc_len >> 8) & 0xff);
+		buf[off++] = (desc_len & 0xff);
+		/*
+		 * Size of full desctipor header minus TransportID
+		 * containing $FABRIC_MOD specific) initiator device/port
+		 * WWN information.
+		 *
+		 *  See spc4r17 Section 6.13.5 Table 169
+		 */
+		add_desc_len = (24 + desc_len);
+
+		off += desc_len;
+		add_len += add_desc_len;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Set ADDITIONAL_LENGTH
+	 */
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	return 0;
+}
+
+static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
+{
+	switch (cdb[1] & 0x1f) {
+	case PRI_READ_KEYS:
+		return core_scsi3_pri_read_keys(cmd);
+	case PRI_READ_RESERVATION:
+		return core_scsi3_pri_read_reservation(cmd);
+	case PRI_REPORT_CAPABILITIES:
+		return core_scsi3_pri_report_capabilities(cmd);
+	case PRI_READ_FULL_STATUS:
+		return core_scsi3_pri_read_full_status(cmd);
+	default:
+		printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
+			" action: 0x%02x\n", cdb[1] & 0x1f);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+}
+
+int core_scsi3_emulate_pr(struct se_cmd *cmd)
+{
+	unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+	struct se_device *dev = cmd->se_dev;
+	/*
+	 * Following spc2r20 5.5.1 Reservations overview:
+	 *
+	 * If a logical unit has been reserved by any RESERVE command and is
+	 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+	 * PERSISTENT RESERVE OUT commands shall conflict regardless of
+	 * initiator or service action and shall terminate with a RESERVATION
+	 * CONFLICT status.
+	 */
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
+		printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
+			" SPC-2 reservation is held, returning"
+			" RESERVATION_CONFLICT\n");
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+
+	return (cdb[0] == PERSISTENT_RESERVE_OUT) ?
+	       core_scsi3_emulate_pr_out(cmd, cdb) :
+	       core_scsi3_emulate_pr_in(cmd, cdb);
+}
+
+static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
+{
+	return 0;
+}
+
+static int core_pt_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	return 0;
+}
+
+int core_setup_reservations(struct se_device *dev, int force_pt)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_reservation_template *rest = &su_dev->t10_reservation;
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, use the reservations
+	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
+	 * cause a problem because libata and some SATA RAID HBAs appear
+	 * under Linux/SCSI, but to emulate reservations themselves.
+	 */
+	if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+	    !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
+		rest->res_type = SPC_PASSTHROUGH;
+		rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
+		rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
+		printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
+			" emulation\n", TRANSPORT(dev)->name);
+		return 0;
+	}
+	/*
+	 * If SPC-3 or above is reported by real or emulated struct se_device,
+	 * use emulated Persistent Reservations.
+	 */
+	if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+		rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
+		rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
+		rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
+		printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
+			" emulation\n", TRANSPORT(dev)->name);
+	} else {
+		rest->res_type = SPC2_RESERVATIONS;
+		rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
+		rest->pr_ops.t10_seq_non_holder =
+				&core_scsi2_reservation_seq_non_holder;
+		printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
+			TRANSPORT(dev)->name);
+	}
+
+	return 0;
+}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
new file mode 100644
index 0000000..5603bcf
--- /dev/null
+++ b/drivers/target/target_core_pr.h
@@ -0,0 +1,67 @@
+#ifndef TARGET_CORE_PR_H
+#define TARGET_CORE_PR_H
+/*
+ * PERSISTENT_RESERVE_OUT service action codes
+ *
+ * spc4r17 section 6.14.2 Table 171
+ */
+#define PRO_REGISTER				0x00
+#define PRO_RESERVE				0x01
+#define PRO_RELEASE				0x02
+#define PRO_CLEAR				0x03
+#define PRO_PREEMPT				0x04
+#define PRO_PREEMPT_AND_ABORT			0x05
+#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY	0x06
+#define PRO_REGISTER_AND_MOVE			0x07
+/*
+ * PERSISTENT_RESERVE_IN service action codes
+ *
+ * spc4r17 section 6.13.1 Table 159
+ */
+#define PRI_READ_KEYS				0x00
+#define PRI_READ_RESERVATION			0x01
+#define PRI_REPORT_CAPABILITIES			0x02
+#define PRI_READ_FULL_STATUS			0x03
+/*
+ * PERSISTENT_RESERVE_ SCOPE field
+ *
+ * spc4r17 section 6.13.3.3 Table 163
+ */
+#define PR_SCOPE_LU_SCOPE			0x00
+/*
+ * PERSISTENT_RESERVE_* TYPE field
+ *
+ * spc4r17 section 6.13.3.4 Table 164
+ */
+#define PR_TYPE_WRITE_EXCLUSIVE			0x01
+#define PR_TYPE_EXCLUSIVE_ACCESS		0x03
+#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY		0x05
+#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY	0x06
+#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG		0x07
+#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG		0x08
+
+#define PR_APTPL_MAX_IPORT_LEN			256
+#define PR_APTPL_MAX_TPORT_LEN			256
+
+extern struct kmem_cache *t10_pr_reg_cache;
+
+extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
+			char *, u32);
+extern int core_scsi2_emulate_crh(struct se_cmd *);
+extern int core_scsi3_alloc_aptpl_registration(
+			struct t10_reservation_template *, u64,
+			unsigned char *, unsigned char *, u32,
+			unsigned char *, u16, u32, int, int, u8);
+extern int core_scsi3_check_aptpl_registration(struct se_device *,
+			struct se_portal_group *, struct se_lun *,
+			struct se_lun_acl *);
+extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+					     struct se_node_acl *);
+extern void core_scsi3_free_all_registrations(struct se_device *);
+extern unsigned char *core_scsi3_pr_dump_type(int);
+extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
+						  struct se_cmd *);
+extern int core_scsi3_emulate_pr(struct se_cmd *);
+extern int core_setup_reservations(struct se_device *, int);
+
+#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
new file mode 100644
index 0000000..742d246
--- /dev/null
+++ b/drivers/target/target_core_pscsi.c
@@ -0,0 +1,1470 @@
+/*******************************************************************************
+ * Filename:  target_core_pscsi.c
+ *
+ * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/genhd.h>
+#include <linux/cdrom.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_pscsi.h"
+
+#define ISPRINT(a)  ((a >= ' ') && (a <= '~'))
+
+static struct se_subsystem_api pscsi_template;
+
+static void pscsi_req_done(struct request *, int);
+
+/*	pscsi_get_sh():
+ *
+ *
+ */
+static struct Scsi_Host *pscsi_get_sh(u32 host_no)
+{
+	struct Scsi_Host *sh = NULL;
+
+	sh = scsi_host_lookup(host_no);
+	if (IS_ERR(sh)) {
+		printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
+				" %u\n", host_no);
+		return NULL;
+	}
+
+	return sh;
+}
+
+/*	pscsi_attach_hba():
+ *
+ * 	pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
+ *	from the passed SCSI Host ID.
+ */
+static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	int hba_depth;
+	struct pscsi_hba_virt *phv;
+
+	phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
+	if (!(phv)) {
+		printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
+		return -1;
+	}
+	phv->phv_host_id = host_id;
+	phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+	hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+	atomic_set(&hba->left_queue_depth, hba_depth);
+	atomic_set(&hba->max_queue_depth, hba_depth);
+
+	hba->hba_ptr = (void *)phv;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
+		" Target Core with TCQ Depth: %d\n", hba->hba_id,
+		atomic_read(&hba->max_queue_depth));
+
+	return 0;
+}
+
+static void pscsi_detach_hba(struct se_hba *hba)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	struct Scsi_Host *scsi_host = phv->phv_lld_host;
+
+	if (scsi_host) {
+		scsi_host_put(scsi_host);
+
+		printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
+			" Generic Target Core\n", hba->hba_id,
+			(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
+			"Unknown");
+	} else
+		printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
+			" from Generic Target Core\n", hba->hba_id);
+
+	kfree(phv);
+	hba->hba_ptr = NULL;
+}
+
+static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+{
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+	/*
+	 * Release the struct Scsi_Host
+	 */
+	if (!(mode_flag)) {
+		if (!(sh))
+			return 0;
+
+		phv->phv_lld_host = NULL;
+		phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+		atomic_set(&hba->left_queue_depth, hba_depth);
+		atomic_set(&hba->max_queue_depth, hba_depth);
+
+		printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+			" %s\n", hba->hba_id, (sh->hostt->name) ?
+			(sh->hostt->name) : "Unknown");
+
+		scsi_host_put(sh);
+		return 0;
+	}
+	/*
+	 * Otherwise, locate struct Scsi_Host from the original passed
+	 * pSCSI Host ID and enable for phba mode
+	 */
+	sh = pscsi_get_sh(phv->phv_host_id);
+	if (!(sh)) {
+		printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
+			" phv_host_id: %d\n", phv->phv_host_id);
+		return -1;
+	}
+	/*
+	 * Usually the SCSI LLD will use the hostt->can_queue value to define
+	 * its HBA TCQ depth.  Some other drivers (like 2.6 megaraid) don't set
+	 * this at all and set sh->can_queue at runtime.
+	 */
+	hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
+		sh->hostt->can_queue : sh->can_queue;
+
+	atomic_set(&hba->left_queue_depth, hba_depth);
+	atomic_set(&hba->max_queue_depth, hba_depth);
+
+	phv->phv_lld_host = sh;
+	phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+		hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
+
+	return 1;
+}
+
+static void pscsi_tape_read_blocksize(struct se_device *dev,
+		struct scsi_device *sdev)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(12, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = MODE_SENSE;
+	cdb[4] = 0x0c; /* 12 bytes */
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
+			HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	/*
+	 * If MODE_SENSE still returns zero, set the default value to 1024.
+	 */
+	sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+	if (!sdev->sector_size)
+		sdev->sector_size = 1024;
+out_free:
+	kfree(buf);
+}
+
+static void
+pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char *buf;
+
+	if (sdev->inquiry_len < INQUIRY_LEN)
+		return;
+
+	buf = sdev->inquiry;
+	if (!buf)
+		return;
+	/*
+	 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
+	 */
+	memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
+	memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
+	memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
+}
+
+static int
+pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return -1;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x80; /* Unit Serial Number */
+	cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
+
+	wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+
+	kfree(buf);
+	return 0;
+
+out_free:
+	kfree(buf);
+	return -1;
+}
+
+static void
+pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
+		struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
+	int ident_len, page_len, off = 4, ret;
+	struct t10_vpd *vpd;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x83; /* Device Identifier */
+	cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
+			      NULL, HZ, 1, NULL);
+	if (ret)
+		goto out;
+
+	page_len = (buf[2] << 8) | buf[3];
+	while (page_len > 0) {
+		/* Grab a pointer to the Identification descriptor */
+		page_83 = &buf[off];
+		ident_len = page_83[3];
+		if (!ident_len) {
+			printk(KERN_ERR "page_83[3]: identifier"
+					" length zero!\n");
+			break;
+		}
+		printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
+
+		vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
+		if (!vpd) {
+			printk(KERN_ERR "Unable to allocate memory for"
+					" struct t10_vpd\n");
+			goto out;
+		}
+		INIT_LIST_HEAD(&vpd->vpd_list);
+
+		transport_set_vpd_proto_id(vpd, page_83);
+		transport_set_vpd_assoc(vpd, page_83);
+
+		if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+		if (transport_set_vpd_ident(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+
+		list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
+		off += (ident_len + 4);
+		page_len -= (ident_len + 4);
+	}
+
+out:
+	kfree(buf);
+}
+
+/*	pscsi_add_device_to_list():
+ *
+ *
+ */
+static struct se_device *pscsi_add_device_to_list(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	struct pscsi_dev_virt *pdv,
+	struct scsi_device *sd,
+	int dev_flags)
+{
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct request_queue *q;
+	struct queue_limits *limits;
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	if (!sd->queue_depth) {
+		sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
+
+		printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
+			" queue_depth to %d\n", sd->channel, sd->id,
+				sd->lun, sd->queue_depth);
+	}
+	/*
+	 * Setup the local scope queue_limits from struct request_queue->limits
+	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+	 */
+	q = sd->request_queue;
+	limits = &dev_limits.limits;
+	limits->logical_block_size = sd->sector_size;
+	limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
+				  queue_max_hw_sectors(q) : sd->host->max_sectors;
+	limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
+				  queue_max_sectors(q) : sd->host->max_sectors;
+	dev_limits.hw_queue_depth = sd->queue_depth;
+	dev_limits.queue_depth = sd->queue_depth;
+	/*
+	 * Setup our standard INQUIRY info into se_dev->t10_wwn
+	 */
+	pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
+
+	/*
+	 * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
+	 * which has already been referenced with Linux SCSI code with
+	 * scsi_device_get() in this file's pscsi_create_virtdevice().
+	 *
+	 * The passthrough operations called by the transport_add_device_*
+	 * function below will require this pointer to be set for passthroug
+	 *  ops.
+	 *
+	 * For the shutdown case in pscsi_free_device(), this struct
+	 * scsi_device  reference is released with Linux SCSI code
+	 * scsi_device_put() and the pdv->pdv_sd cleared.
+	 */
+	pdv->pdv_sd = sd;
+
+	dev = transport_add_device_to_core_hba(hba, &pscsi_template,
+				se_dev, dev_flags, (void *)pdv,
+				&dev_limits, NULL, NULL);
+	if (!(dev)) {
+		pdv->pdv_sd = NULL;
+		return NULL;
+	}
+
+	/*
+	 * Locate VPD WWN Information used for various purposes within
+	 * the Storage Engine.
+	 */
+	if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+		/*
+		 * If VPD Unit Serial returned GOOD status, try
+		 * VPD Device Identification page (0x83).
+		 */
+		pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+	}
+
+	/*
+	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+	 */
+	if (sd->type == TYPE_TAPE)
+		pscsi_tape_read_blocksize(dev, sd);
+	return dev;
+}
+
+static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct pscsi_dev_virt *pdv;
+
+	pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
+	if (!(pdv)) {
+		printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
+		return NULL;
+	}
+	pdv->pdv_se_hba = hba;
+
+	printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+	return (void *)pdv;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_disk(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	struct block_device *bd;
+	u32 dev_flags = 0;
+
+	if (scsi_device_get(sd)) {
+		printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return NULL;
+	}
+	spin_unlock_irq(sh->host_lock);
+	/*
+	 * Claim exclusive struct block_device access to struct scsi_device
+	 * for TYPE_DISK using supplied udev_path
+	 */
+	bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+				FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
+	if (!(bd)) {
+		printk("pSCSI: blkdev_get_by_path() failed\n");
+		scsi_device_put(sd);
+		return NULL;
+	}
+	pdv->pdv_bd = bd;
+
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!(dev)) {
+		blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+		scsi_device_put(sd);
+		return NULL;
+	}
+	printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+		phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_rom(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	u32 dev_flags = 0;
+
+	if (scsi_device_get(sd)) {
+		printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return NULL;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!(dev)) {
+		scsi_device_put(sd);
+		return NULL;
+	}
+	printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+		sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+/*
+ *Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_other(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	u32 dev_flags = 0;
+
+	spin_unlock_irq(sh->host_lock);
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!(dev))
+		return NULL;
+
+	printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+		sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+static struct se_device *pscsi_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
+	struct se_device *dev;
+	struct scsi_device *sd;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	int legacy_mode_enable = 0;
+
+	if (!(pdv)) {
+		printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
+				" parameter\n");
+		return NULL;
+	}
+	/*
+	 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
+	 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
+	 */
+	if (!(sh)) {
+		if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+			printk(KERN_ERR "pSCSI: Unable to locate struct"
+				" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
+			return NULL;
+		}
+		/*
+		 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
+		 * reference, we enforce that udev_path has been set
+		 */
+		if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+			printk(KERN_ERR "pSCSI: udev_path attribute has not"
+				" been set before ENABLE=1\n");
+			return NULL;
+		}
+		/*
+		 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
+		 * use the original TCM hba ID to reference Linux/SCSI Host No
+		 * and enable for PHV_LLD_SCSI_HOST_NO mode.
+		 */
+		if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
+			spin_lock(&hba->device_lock);
+			if (!(list_empty(&hba->hba_dev_list))) {
+				printk(KERN_ERR "pSCSI: Unable to set hba_mode"
+					" with active devices\n");
+				spin_unlock(&hba->device_lock);
+				return NULL;
+			}
+			spin_unlock(&hba->device_lock);
+
+			if (pscsi_pmode_enable_hba(hba, 1) != 1)
+				return NULL;
+
+			legacy_mode_enable = 1;
+			hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+			sh = phv->phv_lld_host;
+		} else {
+			sh = pscsi_get_sh(pdv->pdv_host_id);
+			if (!(sh)) {
+				printk(KERN_ERR "pSCSI: Unable to locate"
+					" pdv_host_id: %d\n", pdv->pdv_host_id);
+				return NULL;
+			}
+		}
+	} else {
+		if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
+			printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
+				" struct Scsi_Host exists\n");
+			return NULL;
+		}
+	}
+
+	spin_lock_irq(sh->host_lock);
+	list_for_each_entry(sd, &sh->__devices, siblings) {
+		if ((pdv->pdv_channel_id != sd->channel) ||
+		    (pdv->pdv_target_id != sd->id) ||
+		    (pdv->pdv_lun_id != sd->lun))
+			continue;
+		/*
+		 * Functions will release the held struct scsi_host->host_lock
+		 * before calling calling pscsi_add_device_to_list() to register
+		 * struct scsi_device with target_core_mod.
+		 */
+		switch (sd->type) {
+		case TYPE_DISK:
+			dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+			break;
+		case TYPE_ROM:
+			dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+			break;
+		default:
+			dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+			break;
+		}
+
+		if (!(dev)) {
+			if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+				scsi_host_put(sh);
+			else if (legacy_mode_enable) {
+				pscsi_pmode_enable_hba(hba, 0);
+				hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+			}
+			pdv->pdv_sd = NULL;
+			return NULL;
+		}
+		return dev;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+		pdv->pdv_channel_id,  pdv->pdv_target_id, pdv->pdv_lun_id);
+
+	if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+		scsi_host_put(sh);
+	else if (legacy_mode_enable) {
+		pscsi_pmode_enable_hba(hba, 0);
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+	}
+
+	return NULL;
+}
+
+/*	pscsi_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void pscsi_free_device(void *p)
+{
+	struct pscsi_dev_virt *pdv = p;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	if (sd) {
+		/*
+		 * Release exclusive pSCSI internal struct block_device claim for
+		 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
+		 */
+		if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
+			blkdev_put(pdv->pdv_bd,
+				   FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+			pdv->pdv_bd = NULL;
+		}
+		/*
+		 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
+		 * to struct Scsi_Host now.
+		 */
+		if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
+		    (phv->phv_lld_host != NULL))
+			scsi_host_put(phv->phv_lld_host);
+
+		if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+			scsi_device_put(sd);
+
+		pdv->pdv_sd = NULL;
+	}
+
+	kfree(pdv);
+}
+
+static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
+{
+	return container_of(task, struct pscsi_plugin_task, pscsi_task);
+}
+
+
+/*	pscsi_transport_complete():
+ *
+ *
+ */
+static int pscsi_transport_complete(struct se_task *task)
+{
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+	int result;
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	unsigned char *cdb = &pt->pscsi_cdb[0];
+
+	result = pt->pscsi_result;
+	/*
+	 * Hack to make sure that Write-Protect modepage is set if R/O mode is
+	 * forced.
+	 */
+	if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
+	     (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		if (!TASK_CMD(task)->se_deve)
+			goto after_mode_sense;
+
+		if (TASK_CMD(task)->se_deve->lun_flags &
+				TRANSPORT_LUNFLAGS_READ_ONLY) {
+			unsigned char *buf = (unsigned char *)
+				T_TASK(task->task_se_cmd)->t_task_buf;
+
+			if (cdb[0] == MODE_SENSE_10) {
+				if (!(buf[3] & 0x80))
+					buf[3] |= 0x80;
+			} else {
+				if (!(buf[2] & 0x80))
+					buf[2] |= 0x80;
+			}
+		}
+	}
+after_mode_sense:
+
+	if (sd->type != TYPE_TAPE)
+		goto after_mode_select;
+
+	/*
+	 * Hack to correctly obtain the initiator requested blocksize for
+	 * TYPE_TAPE.  Since this value is dependent upon each tape media,
+	 * struct scsi_device->sector_size will not contain the correct value
+	 * by default, so we go ahead and set it so
+	 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
+	 * storage engine.
+	 */
+	if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
+	      (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		unsigned char *buf;
+		struct scatterlist *sg = task->task_sg;
+		u16 bdl;
+		u32 blocksize;
+
+		buf = sg_virt(&sg[0]);
+		if (!(buf)) {
+			printk(KERN_ERR "Unable to get buf for scatterlist\n");
+			goto after_mode_select;
+		}
+
+		if (cdb[0] == MODE_SELECT)
+			bdl = (buf[3]);
+		else
+			bdl = (buf[6] << 8) | (buf[7]);
+
+		if (!bdl)
+			goto after_mode_select;
+
+		if (cdb[0] == MODE_SELECT)
+			blocksize = (buf[9] << 16) | (buf[10] << 8) |
+					(buf[11]);
+		else
+			blocksize = (buf[13] << 16) | (buf[14] << 8) |
+					(buf[15]);
+
+		sd->sector_size = blocksize;
+	}
+after_mode_select:
+
+	if (status_byte(result) & CHECK_CONDITION)
+		return 1;
+
+	return 0;
+}
+
+static struct se_task *
+pscsi_alloc_task(struct se_cmd *cmd)
+{
+	struct pscsi_plugin_task *pt;
+	unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
+
+	pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
+	if (!pt) {
+		printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
+		return NULL;
+	}
+
+	/*
+	 * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
+	 * allocate the extended CDB buffer for per struct se_task context
+	 * pt->pscsi_cdb now.
+	 */
+	if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
+
+		pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
+		if (!(pt->pscsi_cdb)) {
+			printk(KERN_ERR "pSCSI: Unable to allocate extended"
+					" pt->pscsi_cdb\n");
+			return NULL;
+		}
+	} else
+		pt->pscsi_cdb = &pt->__pscsi_cdb[0];
+
+	return &pt->pscsi_task;
+}
+
+static inline void pscsi_blk_init_request(
+	struct se_task *task,
+	struct pscsi_plugin_task *pt,
+	struct request *req,
+	int bidi_read)
+{
+	/*
+	 * Defined as "scsi command" in include/linux/blkdev.h.
+	 */
+	req->cmd_type = REQ_TYPE_BLOCK_PC;
+	/*
+	 * For the extra BIDI-COMMAND READ struct request we do not
+	 * need to setup the remaining structure members
+	 */
+	if (bidi_read)
+		return;
+	/*
+	 * Setup the done function pointer for struct request,
+	 * also set the end_io_data pointer.to struct se_task.
+	 */
+	req->end_io = pscsi_req_done;
+	req->end_io_data = (void *)task;
+	/*
+	 * Load the referenced struct se_task's SCSI CDB into
+	 * include/linux/blkdev.h:struct request->cmd
+	 */
+	req->cmd_len = scsi_command_size(pt->pscsi_cdb);
+	req->cmd = &pt->pscsi_cdb[0];
+	/*
+	 * Setup pointer for outgoing sense data.
+	 */
+	req->sense = (void *)&pt->pscsi_sense[0];
+	req->sense_len = 0;
+}
+
+/*
+ * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
+*/
+static int pscsi_blk_get_request(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+
+	pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
+			(task->task_data_direction == DMA_TO_DEVICE),
+			GFP_KERNEL);
+	if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
+		printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
+				IS_ERR(pt->pscsi_req));
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+	 * and setup rq callback, CDB and sense.
+	 */
+	pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+	return 0;
+}
+
+/*      pscsi_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int pscsi_do_task(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	/*
+	 * Set the struct request->timeout value based on peripheral
+	 * device type from SCSI.
+	 */
+	if (pdv->pdv_sd->type == TYPE_DISK)
+		pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
+	else
+		pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
+
+	pt->pscsi_req->retries = PS_RETRY;
+	/*
+	 * Queue the struct request into the struct scsi_device->request_queue.
+	 * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
+	 * descriptor
+	 */
+	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
+			(task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
+			pscsi_req_done);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static void pscsi_free_task(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct se_cmd *cmd = task->task_se_cmd;
+
+	/*
+	 * Release the extended CDB allocation from pscsi_alloc_task()
+	 * if one exists.
+	 */
+	if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
+		kfree(pt->pscsi_cdb);
+	/*
+	 * We do not release the bio(s) here associated with this task, as
+	 * this is handled by bio_put() and pscsi_bi_endio().
+	 */
+	kfree(pt);
+}
+
+enum {
+	Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
+	Opt_scsi_lun_id, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_scsi_host_id, "scsi_host_id=%d"},
+	{Opt_scsi_channel_id, "scsi_channel_id=%d"},
+	{Opt_scsi_target_id, "scsi_target_id=%d"},
+	{Opt_scsi_lun_id, "scsi_lun_id=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_scsi_host_id:
+			if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+				printk(KERN_ERR "PSCSI[%d]: Unable to accept"
+					" scsi_host_id while phv_mode =="
+					" PHV_LLD_SCSI_HOST_NO\n",
+					phv->phv_host_id);
+				ret = -EINVAL;
+				goto out;
+			}
+			match_int(args, &arg);
+			pdv->pdv_host_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_host_id);
+			pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
+			break;
+		case Opt_scsi_channel_id:
+			match_int(args, &arg);
+			pdv->pdv_channel_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
+				" ID: %d\n",  phv->phv_host_id,
+				pdv->pdv_channel_id);
+			pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
+			break;
+		case Opt_scsi_target_id:
+			match_int(args, &arg);
+			pdv->pdv_target_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
+				" ID: %d\n", phv->phv_host_id,
+				pdv->pdv_target_id);
+			pdv->pdv_flags |= PDF_HAS_TARGET_ID;
+			break;
+		case Opt_scsi_lun_id:
+			match_int(args, &arg);
+			pdv->pdv_lun_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
+			pdv->pdv_flags |= PDF_HAS_LUN_ID;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t pscsi_check_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev)
+{
+	struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+
+	if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+		printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
+			" scsi_lun_id= parameters\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
+					      struct se_subsystem_dev *se_dev,
+					      char *b)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+        struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+	unsigned char host_id[16];
+	ssize_t bl;
+	int i;
+
+	if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+		snprintf(host_id, 16, "%d", pdv->pdv_host_id);
+	else
+		snprintf(host_id, 16, "PHBA Mode");
+
+	bl = sprintf(b, "SCSI Device Bus Location:"
+		" Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
+		pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
+		host_id);
+
+	if (sd) {
+		bl += sprintf(b + bl, "        ");
+		bl += sprintf(b + bl, "Vendor: ");
+		for (i = 0; i < 8; i++) {
+			if (ISPRINT(sd->vendor[i]))   /* printable character? */
+				bl += sprintf(b + bl, "%c", sd->vendor[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Model: ");
+		for (i = 0; i < 16; i++) {
+			if (ISPRINT(sd->model[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->model[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Rev: ");
+		for (i = 0; i < 4; i++) {
+			if (ISPRINT(sd->rev[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->rev[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, "\n");
+	}
+	return bl;
+}
+
+static void pscsi_bi_endio(struct bio *bio, int error)
+{
+	bio_put(bio);
+}
+
+static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
+{
+	struct bio *bio;
+	/*
+	 * Use bio_malloc() following the comment in for bio -> struct request
+	 * in block/blk-core.c:blk_make_request()
+	 */
+	bio = bio_kmalloc(GFP_KERNEL, sg_num);
+	if (!(bio)) {
+		printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
+		return NULL;
+	}
+	bio->bi_end_io = pscsi_bi_endio;
+
+	return bio;
+}
+
+#if 0
+#define DEBUG_PSCSI(x...) printk(x)
+#else
+#define DEBUG_PSCSI(x...)
+#endif
+
+static int __pscsi_map_task_SG(
+	struct se_task *task,
+	struct scatterlist *task_sg,
+	u32 task_sg_num,
+	int bidi_read)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+	struct page *page;
+	struct scatterlist *sg;
+	u32 data_len = task->task_size, i, len, bytes, off;
+	int nr_pages = (task->task_size + task_sg[0].offset +
+			PAGE_SIZE - 1) >> PAGE_SHIFT;
+	int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+	int rw = (task->task_data_direction == DMA_TO_DEVICE);
+
+	if (!task->task_size)
+		return 0;
+	/*
+	 * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
+	 * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
+	 * struct scatterlist memory.  The struct se_task->task_sg[] currently needs
+	 * to be attached to struct bios for submission to Linux/SCSI using
+	 * struct request to struct scsi_device->request_queue.
+	 *
+	 * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
+	 * is ported to upstream SCSI passthrough functionality that accepts
+	 * struct scatterlist->page_link or struct page as a paraemeter.
+	 */
+	DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
+
+	for_each_sg(task_sg, sg, task_sg_num, i) {
+		page = sg_page(sg);
+		off = sg->offset;
+		len = sg->length;
+
+		DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+			page, len, off);
+
+		while (len > 0 && data_len > 0) {
+			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+			bytes = min(bytes, data_len);
+
+			if (!(bio)) {
+				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+				nr_pages -= nr_vecs;
+				/*
+				 * Calls bio_kmalloc() and sets bio->bi_end_io()
+				 */
+				bio = pscsi_get_bio(pdv, nr_vecs);
+				if (!(bio))
+					goto fail;
+
+				if (rw)
+					bio->bi_rw |= REQ_WRITE;
+
+				DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
+					" dir: %s nr_vecs: %d\n", bio,
+					(rw) ? "rw" : "r", nr_vecs);
+				/*
+				 * Set *hbio pointer to handle the case:
+				 * nr_pages > BIO_MAX_PAGES, where additional
+				 * bios need to be added to complete a given
+				 * struct se_task
+				 */
+				if (!hbio)
+					hbio = tbio = bio;
+				else
+					tbio = tbio->bi_next = bio;
+			}
+
+			DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
+				" bio: %p page: %p len: %d off: %d\n", i, bio,
+				page, len, off);
+
+			rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
+					bio, page, bytes, off);
+			if (rc != bytes)
+				goto fail;
+
+			DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+				bio->bi_vcnt, nr_vecs);
+
+			if (bio->bi_vcnt > nr_vecs) {
+				DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
+					" %d i: %d bio: %p, allocating another"
+					" bio\n", bio->bi_vcnt, i, bio);
+				/*
+				 * Clear the pointer so that another bio will
+				 * be allocated with pscsi_get_bio() above, the
+				 * current bio has already been set *tbio and
+				 * bio->bi_next.
+				 */
+				bio = NULL;
+			}
+
+			page++;
+			len -= bytes;
+			data_len -= bytes;
+			off = 0;
+		}
+	}
+	/*
+	 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
+	 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
+	 */
+	if (!(bidi_read)) {
+		/*
+		 * Starting with v2.6.31, call blk_make_request() passing in *hbio to
+		 * allocate the pSCSI task a struct request.
+		 */
+		pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
+					hbio, GFP_KERNEL);
+		if (!(pt->pscsi_req)) {
+			printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
+			goto fail;
+		}
+		/*
+		 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+		 * and setup rq callback, CDB and sense.
+		 */
+		pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+
+		return task->task_sg_num;
+	}
+	/*
+	 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
+	 * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
+	 */
+	pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
+					hbio, GFP_KERNEL);
+	if (!(pt->pscsi_req->next_rq)) {
+		printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
+		goto fail;
+	}
+	pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
+
+	return task->task_sg_num;
+fail:
+	while (hbio) {
+		bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_endio(bio, 0);
+	}
+	return ret;
+}
+
+static int pscsi_map_task_SG(struct se_task *task)
+{
+	int ret;
+
+	/*
+	 * Setup the main struct request for the task->task_sg[] payload
+	 */
+
+	ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
+	if (ret >= 0 && task->task_sg_bidi) {
+		/*
+		 * If present, set up the extra BIDI-COMMAND SCSI READ
+		 * struct request and payload.
+		 */
+		ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
+					task->task_sg_num, 1);
+	}
+
+	if (ret < 0)
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	return 0;
+}
+
+/*	pscsi_map_task_non_SG():
+ *
+ *
+ */
+static int pscsi_map_task_non_SG(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	int ret = 0;
+
+	if (pscsi_blk_get_request(task) < 0)
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	if (!task->task_size)
+		return 0;
+
+	ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
+			pt->pscsi_req, T_TASK(cmd)->t_task_buf,
+			task->task_size, GFP_KERNEL);
+	if (ret < 0) {
+		printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	return 0;
+}
+
+static int pscsi_CDB_none(struct se_task *task)
+{
+	return pscsi_blk_get_request(task);
+}
+
+/*	pscsi_get_cdb():
+ *
+ *
+ */
+static unsigned char *pscsi_get_cdb(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	return pt->pscsi_cdb;
+}
+
+/*	pscsi_get_sense_buffer():
+ *
+ *
+ */
+static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	return (unsigned char *)&pt->pscsi_sense[0];
+}
+
+/*	pscsi_get_device_rev():
+ *
+ *
+ */
+static u32 pscsi_get_device_rev(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+}
+
+/*	pscsi_get_device_type():
+ *
+ *
+ */
+static u32 pscsi_get_device_type(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	return sd->type;
+}
+
+static sector_t pscsi_get_blocks(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+
+	if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+		return pdv->pdv_bd->bd_part->nr_sects;
+
+	dump_stack();
+	return 0;
+}
+
+/*	pscsi_handle_SAM_STATUS_failures():
+ *
+ *
+ */
+static inline void pscsi_process_SAM_status(
+	struct se_task *task,
+	struct pscsi_plugin_task *pt)
+{
+	task->task_scsi_status = status_byte(pt->pscsi_result);
+	if ((task->task_scsi_status)) {
+		task->task_scsi_status <<= 1;
+		printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+	}
+
+	switch (host_byte(pt->pscsi_result)) {
+	case DID_OK:
+		transport_complete_task(task, (!task->task_scsi_status));
+		break;
+	default:
+		printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+		task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		TASK_CMD(task)->transport_error_status =
+					PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		transport_complete_task(task, 0);
+		break;
+	}
+
+	return;
+}
+
+static void pscsi_req_done(struct request *req, int uptodate)
+{
+	struct se_task *task = req->end_io_data;
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	pt->pscsi_result = req->errors;
+	pt->pscsi_resid = req->resid_len;
+
+	pscsi_process_SAM_status(task, pt);
+	/*
+	 * Release BIDI-READ if present
+	 */
+	if (req->next_rq != NULL)
+		__blk_put_request(req->q, req->next_rq);
+
+	__blk_put_request(req->q, req);
+	pt->pscsi_req = NULL;
+}
+
+static struct se_subsystem_api pscsi_template = {
+	.name			= "pscsi",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_PHBA_PDEV,
+	.cdb_none		= pscsi_CDB_none,
+	.map_task_non_SG	= pscsi_map_task_non_SG,
+	.map_task_SG		= pscsi_map_task_SG,
+	.attach_hba		= pscsi_attach_hba,
+	.detach_hba		= pscsi_detach_hba,
+	.pmode_enable_hba	= pscsi_pmode_enable_hba,
+	.allocate_virtdevice	= pscsi_allocate_virtdevice,
+	.create_virtdevice	= pscsi_create_virtdevice,
+	.free_device		= pscsi_free_device,
+	.transport_complete	= pscsi_transport_complete,
+	.alloc_task		= pscsi_alloc_task,
+	.do_task		= pscsi_do_task,
+	.free_task		= pscsi_free_task,
+	.check_configfs_dev_params = pscsi_check_configfs_dev_params,
+	.set_configfs_dev_params = pscsi_set_configfs_dev_params,
+	.show_configfs_dev_params = pscsi_show_configfs_dev_params,
+	.get_cdb		= pscsi_get_cdb,
+	.get_sense_buffer	= pscsi_get_sense_buffer,
+	.get_device_rev		= pscsi_get_device_rev,
+	.get_device_type	= pscsi_get_device_type,
+	.get_blocks		= pscsi_get_blocks,
+};
+
+static int __init pscsi_module_init(void)
+{
+	return transport_subsystem_register(&pscsi_template);
+}
+
+static void pscsi_module_exit(void)
+{
+	transport_subsystem_release(&pscsi_template);
+}
+
+MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(pscsi_module_init);
+module_exit(pscsi_module_exit);
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
new file mode 100644
index 0000000..a4cd5d3
--- /dev/null
+++ b/drivers/target/target_core_pscsi.h
@@ -0,0 +1,65 @@
+#ifndef TARGET_CORE_PSCSI_H
+#define TARGET_CORE_PSCSI_H
+
+#define PSCSI_VERSION		"v4.0"
+#define PSCSI_VIRTUAL_HBA_DEPTH	2048
+
+/* used in pscsi_find_alloc_len() */
+#ifndef INQUIRY_DATA_SIZE
+#define INQUIRY_DATA_SIZE	0x24
+#endif
+
+/* used in pscsi_add_device_to_list() */
+#define PSCSI_DEFAULT_QUEUEDEPTH	1
+
+#define PS_RETRY		5
+#define PS_TIMEOUT_DISK		(15*HZ)
+#define PS_TIMEOUT_OTHER	(500*HZ)
+
+#include <linux/device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_device.h>
+#include <linux/kref.h>
+#include <linux/kobject.h>
+
+struct pscsi_plugin_task {
+	struct se_task pscsi_task;
+	unsigned char *pscsi_cdb;
+	unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
+	unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
+	int	pscsi_direction;
+	int	pscsi_result;
+	u32	pscsi_resid;
+	struct request *pscsi_req;
+} ____cacheline_aligned;
+
+#define PDF_HAS_CHANNEL_ID	0x01
+#define PDF_HAS_TARGET_ID	0x02
+#define PDF_HAS_LUN_ID		0x04
+#define PDF_HAS_VPD_UNIT_SERIAL 0x08
+#define PDF_HAS_VPD_DEV_IDENT	0x10
+#define PDF_HAS_VIRT_HOST_ID	0x20
+
+struct pscsi_dev_virt {
+	int	pdv_flags;
+	int	pdv_host_id;
+	int	pdv_channel_id;
+	int	pdv_target_id;
+	int	pdv_lun_id;
+	struct block_device *pdv_bd;
+	struct scsi_device *pdv_sd;
+	struct se_hba *pdv_se_hba;
+} ____cacheline_aligned;
+
+typedef enum phv_modes {
+	PHV_VIRUTAL_HOST_ID,
+	PHV_LLD_SCSI_HOST_NO
+} phv_modes_t;
+
+struct pscsi_hba_virt {
+	int			phv_host_id;
+	phv_modes_t		phv_mode;
+	struct Scsi_Host	*phv_lld_host;
+} ____cacheline_aligned;
+
+#endif   /*** TARGET_CORE_PSCSI_H ***/
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 0000000..979aebf
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,1091 @@
+/*******************************************************************************
+ * Filename:  target_core_rd.c
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_rd.h"
+
+static struct se_subsystem_api rd_dr_template;
+static struct se_subsystem_api rd_mcp_template;
+
+/* #define DEBUG_RAMDISK_MCP */
+/* #define DEBUG_RAMDISK_DR */
+
+/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct rd_host *rd_host;
+
+	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
+	if (!(rd_host)) {
+		printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
+		return -ENOMEM;
+	}
+
+	rd_host->rd_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) rd_host;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+		" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
+		rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
+		RD_MAX_SECTORS);
+
+	return 0;
+}
+
+static void rd_detach_hba(struct se_hba *hba)
+{
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
+
+	kfree(rd_host);
+	hba->hba_ptr = NULL;
+}
+
+/*	rd_release_device_space():
+ *
+ *
+ */
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+	u32 i, j, page_count = 0, sg_per_table;
+	struct rd_dev_sg_table *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+		return;
+
+	sg_table = rd_dev->sg_table_array;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg = sg_table[i].sg_table;
+		sg_per_table = sg_table[i].rd_sg_count;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = sg_page(&sg[j]);
+			if ((pg)) {
+				__free_page(pg);
+				page_count++;
+			}
+		}
+
+		kfree(sg);
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
+		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+	kfree(sg_table);
+	rd_dev->sg_table_array = NULL;
+	rd_dev->sg_table_count = 0;
+}
+
+
+/*	rd_build_device_space():
+ *
+ *
+ */
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	struct rd_dev_sg_table *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (rd_dev->rd_page_count <= 0) {
+		printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+			rd_dev->rd_page_count);
+		return -1;
+	}
+	total_sg_needed = rd_dev->rd_page_count;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+	if (!(sg_table)) {
+		printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+			" scatterlist tables\n");
+		return -1;
+	}
+
+	rd_dev->sg_table_array = sg_table;
+	rd_dev->sg_table_count = sg_tables;
+
+	while (total_sg_needed) {
+		sg_per_table = (total_sg_needed > max_sg_per_table) ?
+			max_sg_per_table : total_sg_needed;
+
+		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+				GFP_KERNEL);
+		if (!(sg)) {
+			printk(KERN_ERR "Unable to allocate scatterlist array"
+				" for struct rd_dev\n");
+			return -1;
+		}
+
+		sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+
+		sg_table[i].sg_table = sg;
+		sg_table[i].rd_sg_count = sg_per_table;
+		sg_table[i].page_start_offset = page_offset;
+		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
+						- 1;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = alloc_pages(GFP_KERNEL, 0);
+			if (!(pg)) {
+				printk(KERN_ERR "Unable to allocate scatterlist"
+					" pages for struct rd_dev_sg_table\n");
+				return -1;
+			}
+			sg_assign_page(&sg[j], pg);
+			sg[j].length = PAGE_SIZE;
+		}
+
+		page_offset += sg_per_table;
+		total_sg_needed -= sg_per_table;
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count);
+
+	return 0;
+}
+
+static void *rd_allocate_virtdevice(
+	struct se_hba *hba,
+	const char *name,
+	int rd_direct)
+{
+	struct rd_dev *rd_dev;
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
+	if (!(rd_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
+		return NULL;
+	}
+
+	rd_dev->rd_host = rd_host;
+	rd_dev->rd_direct = rd_direct;
+
+	return rd_dev;
+}
+
+static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 1);
+}
+
+static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 0);
+}
+
+/*	rd_create_virtdevice():
+ *
+ *
+ */
+static struct se_device *rd_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p,
+	int rd_direct)
+{
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct rd_dev *rd_dev = p;
+	struct rd_host *rd_host = hba->hba_ptr;
+	int dev_flags = 0;
+	char prod[16], rev[4];
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	if (rd_build_device_space(rd_dev) < 0)
+		goto fail;
+
+	snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
+	snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
+						RD_MCP_VERSION);
+
+	dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
+	dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
+	dev_limits.limits.max_sectors = RD_MAX_SECTORS;
+	dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
+
+	dev = transport_add_device_to_core_hba(hba,
+			(rd_dev->rd_direct) ? &rd_dr_template :
+			&rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
+			&dev_limits, prod, rev);
+	if (!(dev))
+		goto fail;
+
+	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
+	rd_dev->rd_queue_depth = dev->queue_depth;
+
+	printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+		" %u pages in %u tables, %lu total bytes\n",
+		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
+		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count,
+		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
+
+	return dev;
+
+fail:
+	rd_release_device_space(rd_dev);
+	return NULL;
+}
+
+static struct se_device *rd_DIRECT_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 1);
+}
+
+static struct se_device *rd_MEMCPY_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 0);
+}
+
+/*	rd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_device(void *p)
+{
+	struct rd_dev *rd_dev = p;
+
+	rd_release_device_space(rd_dev);
+	kfree(rd_dev);
+}
+
+static inline struct rd_request *RD_REQ(struct se_task *task)
+{
+	return container_of(task, struct rd_request, rd_task);
+}
+
+static struct se_task *
+rd_alloc_task(struct se_cmd *cmd)
+{
+	struct rd_request *rd_req;
+
+	rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
+	if (!rd_req) {
+		printk(KERN_ERR "Unable to allocate struct rd_request\n");
+		return NULL;
+	}
+	rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
+
+	return &rd_req->rd_task;
+}
+
+/*	rd_get_sg_table():
+ *
+ *
+ */
+static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
+{
+	u32 i;
+	struct rd_dev_sg_table *sg_table;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg_table = &rd_dev->sg_table_array[i];
+		if ((sg_table->page_start_offset <= page) &&
+		    (sg_table->page_end_offset >= page))
+			return sg_table;
+	}
+
+	printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
+			page);
+
+	return NULL;
+}
+
+/*	rd_MEMCPY_read():
+ *
+ *
+ */
+static int rd_MEMCPY_read(struct rd_request *req)
+{
+	struct se_task *task = &req->rd_task;
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = task->task_sg;
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+		" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	src_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_d[i].length - dst_offset) <
+		    (sg_s[j].length - src_offset)) {
+			length = (sg_d[i].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
+				sg_s[j].length);
+			printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src = sg_virt(&sg_s[j]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst_offset = 0;
+			src_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_s[j].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset,
+				j, sg_s[j].length);
+			printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			if (sg_d[i].length == length) {
+				i++;
+				dst_offset = 0;
+			} else
+				dst_offset = length;
+
+			src = sg_virt(&sg_s[j++]) + src_offset;
+			if (!src)
+				BUG();
+
+			src_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_write():
+ *
+ *
+ */
+static int rd_MEMCPY_write(struct rd_request *req)
+{
+	struct se_task *task = &req->rd_task;
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
+	sg_s = task->task_sg;
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+		" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	dst_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_s[i].length - src_offset) <
+		    (sg_d[j].length - dst_offset)) {
+			length = (sg_s[i].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i++]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst = sg_virt(&sg_d[j]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src_offset = 0;
+			dst_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_d[j].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i]) + src_offset;
+			if (!src)
+				BUG();
+
+			if (sg_s[i].length == length) {
+				i++;
+				src_offset = 0;
+			} else
+				src_offset = length;
+
+			dst = sg_virt(&sg_d[j++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			dst_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_d = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_MEMCPY_do_task(struct se_task *task)
+{
+	struct se_device *dev = task->se_dev;
+	struct rd_request *req = RD_REQ(task);
+	unsigned long long lba;
+	int ret;
+
+	req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+	lba = task->task_lba;
+	req->rd_offset = (do_div(lba,
+			  (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
+			   DEV_ATTRIB(dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (task->task_data_direction == DMA_FROM_DEVICE)
+		ret = rd_MEMCPY_read(req);
+	else
+		ret = rd_MEMCPY_write(req);
+
+	if (ret != 0)
+		return ret;
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_DIRECT_with_offset():
+ *
+ *
+ */
+static int rd_DIRECT_with_offset(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	struct rd_request *req = RD_REQ(task);
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct se_mem *se_mem;
+	struct scatterlist *sg_s;
+	u32 j = 0, set_offset = 1;
+	u32 get_next_table = 0, offset_length, table_sg_end;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+		(task->task_data_direction == DMA_TO_DEVICE) ?
+			"Write" : "Read",
+		task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
+#endif
+	while (req->rd_size) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		if (set_offset) {
+			offset_length = sg_s[j].length - req->rd_offset;
+			if (offset_length > req->rd_size)
+				offset_length = req->rd_size;
+
+			se_mem->se_page = sg_page(&sg_s[j++]);
+			se_mem->se_off = req->rd_offset;
+			se_mem->se_len = offset_length;
+
+			set_offset = 0;
+			get_next_table = (j > table_sg_end);
+			goto check_eot;
+		}
+
+		offset_length = (req->rd_size < req->rd_offset) ?
+			req->rd_size : req->rd_offset;
+
+		se_mem->se_page = sg_page(&sg_s[j]);
+		se_mem->se_len = offset_length;
+
+		set_offset = 1;
+
+check_eot:
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
+			" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
+			req->rd_page, req->rd_size, offset_length, j, se_mem,
+			se_mem->se_page, se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= offset_length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (!set_offset && !get_next_table)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk(KERN_INFO "page: %u in same page table\n",
+					req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_without_offset():
+ *
+ *
+ */
+static int rd_DIRECT_without_offset(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	struct rd_request *req = RD_REQ(task);
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct se_mem *se_mem;
+	struct scatterlist *sg_s;
+	u32 length, j = 0;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
+		(task->task_data_direction == DMA_TO_DEVICE) ?
+			"Write" : "Read",
+		task->task_lba, req->rd_size, req->rd_page);
+#endif
+	while (req->rd_size) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		length = (req->rd_size < sg_s[j].length) ?
+			req->rd_size : sg_s[j].length;
+
+		se_mem->se_page = sg_page(&sg_s[j++]);
+		se_mem->se_len = length;
+
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
+			" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
+			req->rd_size, j, se_mem, se_mem->se_page,
+			se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk("page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_do_se_mem_map():
+ *
+ *
+ */
+static int rd_DIRECT_do_se_mem_map(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	struct se_mem *in_se_mem,
+	struct se_mem **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset_in)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct rd_request *req = RD_REQ(task);
+	u32 task_offset = *task_offset_in;
+	unsigned long long lba;
+	int ret;
+
+	req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
+			PAGE_SIZE);
+	lba = task->task_lba;
+	req->rd_offset = (do_div(lba,
+			  (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
+			   DEV_ATTRIB(task->se_dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (req->rd_offset)
+		ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
+				task_offset_in);
+	else
+		ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
+				task_offset_in);
+
+	if (ret < 0)
+		return ret;
+
+	if (CMD_TFO(cmd)->task_sg_chaining == 0)
+		return 0;
+	/*
+	 * Currently prevent writers from multiple HW fabrics doing
+	 * pci_map_sg() to RD_DR's internal scatterlist memory.
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		printk(KERN_ERR "DMA_TO_DEVICE not supported for"
+				" RAMDISK_DR with task_sg_chaining=1\n");
+		return -1;
+	}
+	/*
+	 * Special case for if task_sg_chaining is enabled, then
+	 * we setup struct se_task->task_sg[], as it will be used by
+	 * transport_do_task_sg_chain() for creating chainged SGLs
+	 * across multiple struct se_task->task_sg[].
+	 */
+	if (!(transport_calc_sg_num(task,
+			list_entry(T_TASK(cmd)->t_mem_list->next,
+				   struct se_mem, se_list),
+			task_offset)))
+		return -1;
+
+	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+			list_entry(T_TASK(cmd)->t_mem_list->next,
+				   struct se_mem, se_list),
+			out_se_mem, se_mem_cnt, task_offset_in);
+}
+
+/*	rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_DIRECT_do_task(struct se_task *task)
+{
+	/*
+	 * At this point the locally allocated RD tables have been mapped
+	 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
+	 */
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_task(struct se_task *task)
+{
+	kfree(RD_REQ(task));
+}
+
+enum {
+	Opt_rd_pages, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_rd_pages, "rd_pages=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t rd_set_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_rd_pages:
+			match_int(args, &arg);
+			rd_dev->rd_page_count = arg;
+			printk(KERN_INFO "RAMDISK: Referencing Page"
+				" Count: %u\n", rd_dev->rd_page_count);
+			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
+			break;
+		default:
+			break;
+		}
+	}
+
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+
+	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+		printk(KERN_INFO "Missing rd_pages= parameter\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t rd_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: %s\n",
+			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
+			"rd_direct" : "rd_mcp");
+	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
+			"  SG_table_count: %u\n", rd_dev->rd_page_count,
+			PAGE_SIZE, rd_dev->sg_table_count);
+	return bl;
+}
+
+/*	rd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *rd_get_cdb(struct se_task *task)
+{
+	struct rd_request *req = RD_REQ(task);
+
+	return req->rd_scsi_cdb;
+}
+
+static u32 rd_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 rd_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t rd_get_blocks(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = dev->dev_ptr;
+	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+			DEV_ATTRIB(dev)->block_size) - 1;
+
+	return blocks_long;
+}
+
+static struct se_subsystem_api rd_dr_template = {
+	.name			= "rd_dr",
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_DIRECT_allocate_virtdevice,
+	.create_virtdevice	= rd_DIRECT_create_virtdevice,
+	.free_device		= rd_free_device,
+	.alloc_task		= rd_alloc_task,
+	.do_task		= rd_DIRECT_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_cdb		= rd_get_cdb,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_blocks		= rd_get_blocks,
+	.do_se_mem_map		= rd_DIRECT_do_se_mem_map,
+};
+
+static struct se_subsystem_api rd_mcp_template = {
+	.name			= "rd_mcp",
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
+	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
+	.free_device		= rd_free_device,
+	.alloc_task		= rd_alloc_task,
+	.do_task		= rd_MEMCPY_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_cdb		= rd_get_cdb,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_blocks		= rd_get_blocks,
+};
+
+int __init rd_module_init(void)
+{
+	int ret;
+
+	ret = transport_subsystem_register(&rd_dr_template);
+	if (ret < 0)
+		return ret;
+
+	ret = transport_subsystem_register(&rd_mcp_template);
+	if (ret < 0) {
+		transport_subsystem_release(&rd_dr_template);
+		return ret;
+	}
+
+	return 0;
+}
+
+void rd_module_exit(void)
+{
+	transport_subsystem_release(&rd_dr_template);
+	transport_subsystem_release(&rd_mcp_template);
+}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
new file mode 100644
index 0000000..13badfb
--- /dev/null
+++ b/drivers/target/target_core_rd.h
@@ -0,0 +1,73 @@
+#ifndef TARGET_CORE_RD_H
+#define TARGET_CORE_RD_H
+
+#define RD_HBA_VERSION		"v4.0"
+#define RD_DR_VERSION		"4.0"
+#define RD_MCP_VERSION		"4.0"
+
+/* Largest piece of memory kmalloc can allocate */
+#define RD_MAX_ALLOCATION_SIZE	65536
+/* Maximum queuedepth for the Ramdisk HBA */
+#define RD_HBA_QUEUE_DEPTH	256
+#define RD_DEVICE_QUEUE_DEPTH	32
+#define RD_MAX_DEVICE_QUEUE_DEPTH 128
+#define RD_BLOCKSIZE		512
+#define RD_MAX_SECTORS		1024
+
+extern struct kmem_cache *se_mem_cache;
+
+/* Used in target_core_init_configfs() for virtual LUN 0 access */
+int __init rd_module_init(void);
+void rd_module_exit(void);
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+struct rd_request {
+	struct se_task	rd_task;
+
+	/* SCSI CDB from iSCSI Command PDU */
+	unsigned char	rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+	/* Offset from start of page */
+	u32		rd_offset;
+	/* Starting page in Ramdisk for request */
+	u32		rd_page;
+	/* Total number of pages needed for request */
+	u32		rd_page_count;
+	/* Scatterlist count */
+	u32		rd_size;
+	/* Ramdisk device */
+	struct rd_dev	*rd_dev;
+} ____cacheline_aligned;
+
+struct rd_dev_sg_table {
+	u32		page_start_offset;
+	u32		page_end_offset;
+	u32		rd_sg_count;
+	struct scatterlist *sg_table;
+} ____cacheline_aligned;
+
+#define RDF_HAS_PAGE_COUNT	0x01
+
+struct rd_dev {
+	int		rd_direct;
+	u32		rd_flags;
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		rd_dev_id;
+	/* Total page count for ramdisk device */
+	u32		rd_page_count;
+	/* Number of SG tables in sg_table_array */
+	u32		sg_table_count;
+	u32		rd_queue_depth;
+	/* Array of rd_dev_sg_table_t containing scatterlists */
+	struct rd_dev_sg_table *sg_table_array;
+	/* Ramdisk HBA device is connected to */
+	struct rd_host *rd_host;
+} ____cacheline_aligned;
+
+struct rd_host {
+	u32		rd_host_dev_id_count;
+	u32		rd_host_id;		/* Unique Ramdisk Host ID */
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_RD_H */
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
new file mode 100644
index 0000000..dc6fed0
--- /dev/null
+++ b/drivers/target/target_core_scdb.c
@@ -0,0 +1,105 @@
+/*******************************************************************************
+ * Filename:  target_core_scdb.c
+ *
+ * This file contains the generic target engine Split CDB related functions.
+ *
+ * Copyright (c) 2004-2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <scsi/scsi.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_scdb.h"
+
+/*	split_cdb_XX_6():
+ *
+ *      21-bit LBA w/ 8-bit SECTORS
+ */
+void split_cdb_XX_6(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	cdb[1] = (lba >> 16) & 0x1f;
+	cdb[2] = (lba >> 8) & 0xff;
+	cdb[3] = lba & 0xff;
+	cdb[4] = *sectors & 0xff;
+}
+
+/*	split_cdb_XX_10():
+ *
+ *	32-bit LBA w/ 16-bit SECTORS
+ */
+void split_cdb_XX_10(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be32(lba, &cdb[2]);
+	put_unaligned_be16(*sectors, &cdb[7]);
+}
+
+/*	split_cdb_XX_12():
+ *
+ *	32-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_12(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be32(lba, &cdb[2]);
+	put_unaligned_be32(*sectors, &cdb[6]);
+}
+
+/*	split_cdb_XX_16():
+ *
+ *	64-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_16(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be64(lba, &cdb[2]);
+	put_unaligned_be32(*sectors, &cdb[10]);
+}
+
+/*
+ *	split_cdb_XX_32():
+ *
+ * 	64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
+ */
+void split_cdb_XX_32(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be64(lba, &cdb[12]);
+	put_unaligned_be32(*sectors, &cdb[28]);
+}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
new file mode 100644
index 0000000..98cd1c0
--- /dev/null
+++ b/drivers/target/target_core_scdb.h
@@ -0,0 +1,10 @@
+#ifndef TARGET_CORE_SCDB_H
+#define TARGET_CORE_SCDB_H
+
+extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
+
+#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
new file mode 100644
index 0000000..158cecb
--- /dev/null
+++ b/drivers/target/target_core_tmr.c
@@ -0,0 +1,404 @@
+/*******************************************************************************
+ * Filename:  target_core_tmr.c
+ *
+ * This file contains SPC-3 task management infrastructure
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+
+#define DEBUG_LUN_RESET
+#ifdef DEBUG_LUN_RESET
+#define DEBUG_LR(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_LR(x...)
+#endif
+
+struct se_tmr_req *core_tmr_alloc_req(
+	struct se_cmd *se_cmd,
+	void *fabric_tmr_ptr,
+	u8 function)
+{
+	struct se_tmr_req *tmr;
+
+	tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
+	if (!(tmr)) {
+		printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tmr->task_cmd = se_cmd;
+	tmr->fabric_tmr_ptr = fabric_tmr_ptr;
+	tmr->function = function;
+	INIT_LIST_HEAD(&tmr->tmr_list);
+
+	return tmr;
+}
+EXPORT_SYMBOL(core_tmr_alloc_req);
+
+void core_tmr_release_req(
+	struct se_tmr_req *tmr)
+{
+	struct se_device *dev = tmr->tmr_dev;
+
+	spin_lock(&dev->se_tmr_lock);
+	list_del(&tmr->tmr_list);
+	kmem_cache_free(se_tmr_req_cache, tmr);
+	spin_unlock(&dev->se_tmr_lock);
+}
+
+static void core_tmr_handle_tas_abort(
+	struct se_node_acl *tmr_nacl,
+	struct se_cmd *cmd,
+	int tas,
+	int fe_count)
+{
+	if (!(fe_count)) {
+		transport_cmd_finish_abort(cmd, 1);
+		return;
+	}
+	/*
+	 * TASK ABORTED status (TAS) bit support
+	*/
+	if (((tmr_nacl != NULL) &&
+	     (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+		transport_send_task_abort(cmd);
+
+	transport_cmd_finish_abort(cmd, 0);
+}
+
+int core_tmr_lun_reset(
+	struct se_device *dev,
+	struct se_tmr_req *tmr,
+	struct list_head *preempt_and_abort_list,
+	struct se_cmd *prout_cmd)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr, *qr_tmp;
+	struct se_node_acl *tmr_nacl = NULL;
+	struct se_portal_group *tmr_tpg = NULL;
+	struct se_queue_obj *qobj = dev->dev_queue_obj;
+	struct se_tmr_req *tmr_p, *tmr_pp;
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+	int fe_count, state, tas;
+	/*
+	 * TASK_ABORTED status bit, this is configurable via ConfigFS
+	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	tas = DEV_ATTRIB(dev)->emulate_tas;
+	/*
+	 * Determine if this se_tmr is coming from a $FABRIC_MOD
+	 * or struct se_device passthrough..
+	 */
+	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+		if (tmr_nacl && tmr_tpg) {
+			DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
+				" initiator port %s\n",
+				TPG_TFO(tmr_tpg)->get_fabric_name(),
+				tmr_nacl->initiatorname);
+		}
+	}
+	DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
+		(preempt_and_abort_list) ? "Preempt" : "TMR",
+		TRANSPORT(dev)->name, tas);
+	/*
+	 * Release all pending and outgoing TMRs aside from the received
+	 * LUN_RESET tmr..
+	 */
+	spin_lock(&dev->se_tmr_lock);
+	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
+		/*
+		 * Allow the received TMR to return with FUNCTION_COMPLETE.
+		 */
+		if (tmr && (tmr_p == tmr))
+			continue;
+
+		cmd = tmr_p->task_cmd;
+		if (!(cmd)) {
+			printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
+			continue;
+		}
+		/*
+		 * If this function was called with a valid pr_res_key
+		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
+		 * skip non regisration key matching TMRs.
+		 */
+		if ((preempt_and_abort_list != NULL) &&
+		    (core_scsi3_check_cdb_abort_and_preempt(
+					preempt_and_abort_list, cmd) != 0))
+			continue;
+		spin_unlock(&dev->se_tmr_lock);
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+		if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+			spin_lock(&dev->se_tmr_lock);
+			continue;
+		}
+		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+			spin_lock(&dev->se_tmr_lock);
+			continue;
+		}
+		DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+			" Response: 0x%02x, t_state: %d\n",
+			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+			tmr_p->function, tmr_p->response, cmd->t_state);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		transport_cmd_finish_abort_tmr(cmd);
+		spin_lock(&dev->se_tmr_lock);
+	}
+	spin_unlock(&dev->se_tmr_lock);
+	/*
+	 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
+	 * This is following sam4r17, section 5.6 Aborting commands, Table 38
+	 * for TMR LUN_RESET:
+	 *
+	 * a) "Yes" indicates that each command that is aborted on an I_T nexus
+	 * other than the one that caused the SCSI device condition is
+	 * completed with TASK ABORTED status, if the TAS bit is set to one in
+	 * the Control mode page (see SPC-4). "No" indicates that no status is
+	 * returned for aborted commands.
+	 *
+	 * d) If the logical unit reset is caused by a particular I_T nexus
+	 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
+	 * (TASK_ABORTED status) applies.
+	 *
+	 * Otherwise (e.g., if triggered by a hard reset), "no"
+	 * (no TASK_ABORTED SAM status) applies.
+	 *
+	 * Note that this seems to be independent of TAS (Task Aborted Status)
+	 * in the Control Mode Page.
+	 */
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
+				t_state_list) {
+		if (!(TASK_CMD(task))) {
+			printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+			continue;
+		}
+		cmd = TASK_CMD(task);
+
+		if (!T_TASK(cmd)) {
+			printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+				" %p ITT: 0x%08x\n", task, cmd,
+				CMD_TFO(cmd)->get_task_tag(cmd));
+			continue;
+		}
+		/*
+		 * For PREEMPT_AND_ABORT usage, only process commands
+		 * with a matching reservation key.
+		 */
+		if ((preempt_and_abort_list != NULL) &&
+		    (core_scsi3_check_cdb_abort_and_preempt(
+					preempt_and_abort_list, cmd) != 0))
+			continue;
+		/*
+		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+		 */
+		if (prout_cmd == cmd)
+			continue;
+
+		list_del(&task->t_state_list);
+		atomic_set(&task->task_state_active, 0);
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+		DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
+			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
+			"def_t_state: %d/%d cdb: 0x%02x\n",
+			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
+			CMD_TFO(cmd)->get_task_tag(cmd), 0,
+			CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+			cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
+		DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+			" t_task_cdbs: %d t_task_cdbs_left: %d"
+			" t_task_cdbs_sent: %d -- t_transport_active: %d"
+			" t_transport_stop: %d t_transport_sent: %d\n",
+			CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
+			T_TASK(cmd)->t_task_cdbs,
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+			atomic_read(&T_TASK(cmd)->t_transport_active),
+			atomic_read(&T_TASK(cmd)->t_transport_stop),
+			atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+		if (atomic_read(&task->task_active)) {
+			atomic_set(&task->task_stop, 1);
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+
+			DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
+				" for dev: %p\n", task, dev);
+			wait_for_completion(&task->task_stop_comp);
+			DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
+				" dev: %p\n", task, dev);
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+			atomic_set(&task->task_active, 0);
+			atomic_set(&task->task_stop, 0);
+		}
+		__transport_stop_task_timer(task, &flags);
+
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+			spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+			DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
+				" t_task_cdbs_ex_left: %d\n", task, dev,
+				atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+		fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
+
+		if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+			DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+				" task: %p, t_fe_count: %d dev: %p\n", task,
+				fe_count, dev);
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+						flags);
+			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+		DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+			" t_fe_count: %d dev: %p\n", task, fe_count, dev);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+	/*
+	 * Release all commands remaining in the struct se_device cmd queue.
+	 *
+	 * This follows the same logic as above for the struct se_device
+	 * struct se_task state list, where commands are returned with
+	 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
+	 * reference, otherwise the struct se_cmd is released.
+	 */
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
+		cmd = (struct se_cmd *)qr->cmd;
+		if (!(cmd)) {
+			/*
+			 * Skip these for non PREEMPT_AND_ABORT usage..
+			 */
+			if (preempt_and_abort_list != NULL)
+				continue;
+
+			atomic_dec(&qobj->queue_cnt);
+			list_del(&qr->qr_list);
+			kfree(qr);
+			continue;
+		}
+		/*
+		 * For PREEMPT_AND_ABORT usage, only process commands
+		 * with a matching reservation key.
+		 */
+		if ((preempt_and_abort_list != NULL) &&
+		    (core_scsi3_check_cdb_abort_and_preempt(
+					preempt_and_abort_list, cmd) != 0))
+			continue;
+		/*
+		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+		 */
+		if (prout_cmd == cmd)
+			continue;
+
+		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+		atomic_dec(&qobj->queue_cnt);
+		list_del(&qr->qr_list);
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+		state = qr->state;
+		kfree(qr);
+
+		DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+			" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
+			"Preempt" : "", cmd, state,
+			atomic_read(&T_TASK(cmd)->t_fe_count));
+		/*
+		 * Signal that the command has failed via cmd->se_cmd_flags,
+		 * and call TFO->new_cmd_failure() to wakeup any fabric
+		 * dependent code used to wait for unsolicited data out
+		 * allocation to complete.  The fabric module is expected
+		 * to dump any remaining unsolicited data out for the aborted
+		 * command at this point.
+		 */
+		transport_new_cmd_failure(cmd);
+
+		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
+				atomic_read(&T_TASK(cmd)->t_fe_count));
+		spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	}
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+	/*
+	 * Clear any legacy SPC-2 reservation when called during
+	 * LOGICAL UNIT RESET
+	 */
+	if (!(preempt_and_abort_list) &&
+	     (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+		spin_lock(&dev->dev_reservation_lock);
+		dev->dev_reserved_node_acl = NULL;
+		dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+		spin_unlock(&dev->dev_reservation_lock);
+		printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+	}
+
+	spin_lock(&dev->stats_lock);
+	dev->num_resets++;
+	spin_unlock(&dev->stats_lock);
+
+	DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+			(preempt_and_abort_list) ? "Preempt" : "TMR",
+			TRANSPORT(dev)->name);
+	return 0;
+}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
new file mode 100644
index 0000000..abfa81a
--- /dev/null
+++ b/drivers/target/target_core_tpg.c
@@ -0,0 +1,826 @@
+/*******************************************************************************
+ * Filename:  target_core_tpg.c
+ *
+ * This file contains generic Target Portal Group related functions.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_hba.h"
+
+/*	core_clear_initiator_node_from_tpg():
+ *
+ *
+ */
+static void core_clear_initiator_node_from_tpg(
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	int i;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_lun_acl *acl, *acl_tmp;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		if (!deve->se_lun) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+
+		lun = deve->se_lun;
+		spin_unlock_irq(&nacl->device_list_lock);
+		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+		spin_lock(&lun->lun_acl_lock);
+		list_for_each_entry_safe(acl, acl_tmp,
+					&lun->lun_acl_list, lacl_list) {
+			if (!(strcmp(acl->initiatorname,
+					nacl->initiatorname)) &&
+			     (acl->mapped_lun == deve->mapped_lun))
+				break;
+		}
+
+		if (!acl) {
+			printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
+				" mapped_lun: %u\n", nacl->initiatorname,
+				deve->mapped_lun);
+			spin_unlock(&lun->lun_acl_lock);
+			spin_lock_irq(&nacl->device_list_lock);
+			continue;
+		}
+
+		list_del(&acl->lacl_list);
+		spin_unlock(&lun->lun_acl_lock);
+
+		spin_lock_irq(&nacl->device_list_lock);
+		kfree(acl);
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+}
+
+/*	__core_tpg_get_initiator_node_acl():
+ *
+ *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
+ */
+struct se_node_acl *__core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	const char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+		if (!(strcmp(acl->initiatorname, initiatorname)))
+			return acl;
+	}
+
+	return NULL;
+}
+
+/*	core_tpg_get_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+		if (!(strcmp(acl->initiatorname, initiatorname)) &&
+		   (!(acl->dynamic_node_acl))) {
+			spin_unlock_bh(&tpg->acl_node_lock);
+			return acl;
+		}
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	return NULL;
+}
+
+/*	core_tpg_add_node_to_devs():
+ *
+ *
+ */
+void core_tpg_add_node_to_devs(
+	struct se_node_acl *acl,
+	struct se_portal_group *tpg)
+{
+	int i = 0;
+	u32 lun_access = 0;
+	struct se_lun *lun;
+	struct se_device *dev;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = &tpg->tpg_lun_list[i];
+		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+			continue;
+
+		spin_unlock(&tpg->tpg_lun_lock);
+
+		dev = lun->lun_se_dev;
+		/*
+		 * By default in LIO-Target $FABRIC_MOD,
+		 * demo_mode_write_protect is ON, or READ_ONLY;
+		 */
+		if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
+			if (dev->dev_flags & DF_READ_ONLY)
+				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+			else
+				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			/*
+			 * Allow only optical drives to issue R/W in default RO
+			 * demo mode.
+			 */
+			if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
+				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+			else
+				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		}
+
+		printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+			" access for LUN in Demo Mode\n",
+			TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
+			"READ-WRITE" : "READ-ONLY");
+
+		core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
+				lun_access, acl, tpg, 1);
+		spin_lock(&tpg->tpg_lun_lock);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+}
+
+/*      core_set_queue_depth_for_node():
+ *
+ *
+ */
+static int core_set_queue_depth_for_node(
+	struct se_portal_group *tpg,
+	struct se_node_acl *acl)
+{
+	if (!acl->queue_depth) {
+		printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
+			"defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
+			acl->initiatorname);
+		acl->queue_depth = 1;
+	}
+
+	return 0;
+}
+
+/*      core_create_device_list_for_node():
+ *
+ *
+ */
+static int core_create_device_list_for_node(struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+	int i;
+
+	nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
+				TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
+	if (!(nacl->device_list)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+			" struct se_node_acl->device_list\n");
+		return -1;
+	}
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		atomic_set(&deve->ua_count, 0);
+		atomic_set(&deve->pr_ref_count, 0);
+		spin_lock_init(&deve->ua_lock);
+		INIT_LIST_HEAD(&deve->alua_port_list);
+		INIT_LIST_HEAD(&deve->ua_list);
+	}
+
+	return 0;
+}
+
+/*	core_tpg_check_initiator_node_acl()
+ *
+ *
+ */
+struct se_node_acl *core_tpg_check_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if ((acl))
+		return acl;
+
+	if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
+		return NULL;
+
+	acl =  TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
+	if (!(acl))
+		return NULL;
+
+	INIT_LIST_HEAD(&acl->acl_list);
+	INIT_LIST_HEAD(&acl->acl_sess_list);
+	spin_lock_init(&acl->device_list_lock);
+	spin_lock_init(&acl->nacl_sess_lock);
+	atomic_set(&acl->acl_pr_ref_count, 0);
+	atomic_set(&acl->mib_ref_count, 0);
+	acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
+	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+	acl->se_tpg = tpg;
+	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+	spin_lock_init(&acl->stats_lock);
+	acl->dynamic_node_acl = 1;
+
+	TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+	if (core_create_device_list_for_node(acl) < 0) {
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return NULL;
+	}
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		core_free_device_list_for_node(acl, tpg);
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return NULL;
+	}
+
+	core_tpg_add_node_to_devs(acl, tpg);
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+	tpg->num_node_acls++;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
+
+void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
+{
+	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
+		cpu_relax();
+}
+
+void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
+{
+	while (atomic_read(&nacl->mib_ref_count) != 0)
+		cpu_relax();
+}
+
+void core_tpg_clear_object_luns(struct se_portal_group *tpg)
+{
+	int i, ret;
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = &tpg->tpg_lun_list[i];
+
+		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
+		    (lun->lun_se_dev == NULL))
+			continue;
+
+		spin_unlock(&tpg->tpg_lun_lock);
+		ret = core_dev_del_lun(tpg, lun->unpacked_lun);
+		spin_lock(&tpg->tpg_lun_lock);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+}
+EXPORT_SYMBOL(core_tpg_clear_object_luns);
+
+/*	core_tpg_add_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_add_initiator_node_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *se_nacl,
+	const char *initiatorname,
+	u32 queue_depth)
+{
+	struct se_node_acl *acl = NULL;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if ((acl)) {
+		if (acl->dynamic_node_acl) {
+			acl->dynamic_node_acl = 0;
+			printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
+				" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+				TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
+			spin_unlock_bh(&tpg->acl_node_lock);
+			/*
+			 * Release the locally allocated struct se_node_acl
+			 * because * core_tpg_add_initiator_node_acl() returned
+			 * a pointer to an existing demo mode node ACL.
+			 */
+			if (se_nacl)
+				TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
+							se_nacl);
+			goto done;
+		}
+
+		printk(KERN_ERR "ACL entry for %s Initiator"
+			" Node %s already exists for TPG %u, ignoring"
+			" request.\n",  TPG_TFO(tpg)->get_fabric_name(),
+			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock_bh(&tpg->acl_node_lock);
+		return ERR_PTR(-EEXIST);
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	if (!(se_nacl)) {
+		printk("struct se_node_acl pointer is NULL\n");
+		return ERR_PTR(-EINVAL);
+	}
+	/*
+	 * For v4.x logic the se_node_acl_s is hanging off a fabric
+	 * dependent structure allocated via
+	 * struct target_core_fabric_ops->fabric_make_nodeacl()
+	 */
+	acl = se_nacl;
+
+	INIT_LIST_HEAD(&acl->acl_list);
+	INIT_LIST_HEAD(&acl->acl_sess_list);
+	spin_lock_init(&acl->device_list_lock);
+	spin_lock_init(&acl->nacl_sess_lock);
+	atomic_set(&acl->acl_pr_ref_count, 0);
+	acl->queue_depth = queue_depth;
+	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+	acl->se_tpg = tpg;
+	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+	spin_lock_init(&acl->stats_lock);
+
+	TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+	if (core_create_device_list_for_node(acl) < 0) {
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		core_free_device_list_for_node(acl, tpg);
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+	tpg->num_node_acls++;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+done:
+	printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
+
+/*	core_tpg_del_initiator_node_acl():
+ *
+ *
+ */
+int core_tpg_del_initiator_node_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *acl,
+	int force)
+{
+	struct se_session *sess, *sess_tmp;
+	int dynamic_acl = 0;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	if (acl->dynamic_node_acl) {
+		acl->dynamic_node_acl = 0;
+		dynamic_acl = 1;
+	}
+	list_del(&acl->acl_list);
+	tpg->num_node_acls--;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	spin_lock_bh(&tpg->session_lock);
+	list_for_each_entry_safe(sess, sess_tmp,
+				&tpg->tpg_sess_list, sess_list) {
+		if (sess->se_node_acl != acl)
+			continue;
+		/*
+		 * Determine if the session needs to be closed by our context.
+		 */
+		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+			continue;
+
+		spin_unlock_bh(&tpg->session_lock);
+		/*
+		 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+		 * forcefully shutdown the $FABRIC_MOD session/nexus.
+		 */
+		TPG_TFO(tpg)->close_session(sess);
+
+		spin_lock_bh(&tpg->session_lock);
+	}
+	spin_unlock_bh(&tpg->session_lock);
+
+	core_tpg_wait_for_nacl_pr_ref(acl);
+	core_tpg_wait_for_mib_ref(acl);
+	core_clear_initiator_node_from_tpg(acl, tpg);
+	core_free_device_list_for_node(acl, tpg);
+
+	printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+		TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
+
+/*	core_tpg_set_initiator_node_queue_depth():
+ *
+ *
+ */
+int core_tpg_set_initiator_node_queue_depth(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname,
+	u32 queue_depth,
+	int force)
+{
+	struct se_session *sess, *init_sess = NULL;
+	struct se_node_acl *acl;
+	int dynamic_acl = 0;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (!(acl)) {
+		printk(KERN_ERR "Access Control List entry for %s Initiator"
+			" Node %s does not exists for TPG %hu, ignoring"
+			" request.\n", TPG_TFO(tpg)->get_fabric_name(),
+			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock_bh(&tpg->acl_node_lock);
+		return -ENODEV;
+	}
+	if (acl->dynamic_node_acl) {
+		acl->dynamic_node_acl = 0;
+		dynamic_acl = 1;
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	spin_lock_bh(&tpg->session_lock);
+	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
+		if (sess->se_node_acl != acl)
+			continue;
+
+		if (!force) {
+			printk(KERN_ERR "Unable to change queue depth for %s"
+				" Initiator Node: %s while session is"
+				" operational.  To forcefully change the queue"
+				" depth and force session reinstatement"
+				" use the \"force=1\" parameter.\n",
+				TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+			spin_unlock_bh(&tpg->session_lock);
+
+			spin_lock_bh(&tpg->acl_node_lock);
+			if (dynamic_acl)
+				acl->dynamic_node_acl = 1;
+			spin_unlock_bh(&tpg->acl_node_lock);
+			return -EEXIST;
+		}
+		/*
+		 * Determine if the session needs to be closed by our context.
+		 */
+		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+			continue;
+
+		init_sess = sess;
+		break;
+	}
+
+	/*
+	 * User has requested to change the queue depth for a Initiator Node.
+	 * Change the value in the Node's struct se_node_acl, and call
+	 * core_set_queue_depth_for_node() to add the requested queue depth.
+	 *
+	 * Finally call  TPG_TFO(tpg)->close_session() to force session
+	 * reinstatement to occur if there is an active session for the
+	 * $FABRIC_MOD Initiator Node in question.
+	 */
+	acl->queue_depth = queue_depth;
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		spin_unlock_bh(&tpg->session_lock);
+		/*
+		 * Force session reinstatement if
+		 * core_set_queue_depth_for_node() failed, because we assume
+		 * the $FABRIC_MOD has already the set session reinstatement
+		 * bit from TPG_TFO(tpg)->shutdown_session() called above.
+		 */
+		if (init_sess)
+			TPG_TFO(tpg)->close_session(init_sess);
+
+		spin_lock_bh(&tpg->acl_node_lock);
+		if (dynamic_acl)
+			acl->dynamic_node_acl = 1;
+		spin_unlock_bh(&tpg->acl_node_lock);
+		return -EINVAL;
+	}
+	spin_unlock_bh(&tpg->session_lock);
+	/*
+	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+	 * forcefully shutdown the $FABRIC_MOD session/nexus.
+	 */
+	if (init_sess)
+		TPG_TFO(tpg)->close_session(init_sess);
+
+	printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
+		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
+		initiatorname, TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg));
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	if (dynamic_acl)
+		acl->dynamic_node_acl = 1;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+
+static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
+{
+	/* Set in core_dev_setup_virtual_lun0() */
+	struct se_device *dev = se_global->g_lun0_dev;
+	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	int ret;
+
+	lun->unpacked_lun = 0;
+	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+	atomic_set(&lun->lun_acl_count, 0);
+	init_completion(&lun->lun_shutdown_comp);
+	INIT_LIST_HEAD(&lun->lun_acl_list);
+	INIT_LIST_HEAD(&lun->lun_cmd_list);
+	spin_lock_init(&lun->lun_acl_lock);
+	spin_lock_init(&lun->lun_cmd_lock);
+	spin_lock_init(&lun->lun_sep_lock);
+
+	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+	if (ret < 0)
+		return -1;
+
+	return 0;
+}
+
+static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
+{
+	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+
+	core_tpg_post_dellun(se_tpg, lun);
+}
+
+int core_tpg_register(
+	struct target_core_fabric_ops *tfo,
+	struct se_wwn *se_wwn,
+	struct se_portal_group *se_tpg,
+	void *tpg_fabric_ptr,
+	int se_tpg_type)
+{
+	struct se_lun *lun;
+	u32 i;
+
+	se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
+				TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
+	if (!(se_tpg->tpg_lun_list)) {
+		printk(KERN_ERR "Unable to allocate struct se_portal_group->"
+				"tpg_lun_list\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = &se_tpg->tpg_lun_list[i];
+		lun->unpacked_lun = i;
+		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+		atomic_set(&lun->lun_acl_count, 0);
+		init_completion(&lun->lun_shutdown_comp);
+		INIT_LIST_HEAD(&lun->lun_acl_list);
+		INIT_LIST_HEAD(&lun->lun_cmd_list);
+		spin_lock_init(&lun->lun_acl_lock);
+		spin_lock_init(&lun->lun_cmd_lock);
+		spin_lock_init(&lun->lun_sep_lock);
+	}
+
+	se_tpg->se_tpg_type = se_tpg_type;
+	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
+	se_tpg->se_tpg_tfo = tfo;
+	se_tpg->se_tpg_wwn = se_wwn;
+	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
+	INIT_LIST_HEAD(&se_tpg->acl_node_list);
+	INIT_LIST_HEAD(&se_tpg->se_tpg_list);
+	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
+	spin_lock_init(&se_tpg->acl_node_lock);
+	spin_lock_init(&se_tpg->session_lock);
+	spin_lock_init(&se_tpg->tpg_lun_lock);
+
+	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
+		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
+			kfree(se_tpg);
+			return -ENOMEM;
+		}
+	}
+
+	spin_lock_bh(&se_global->se_tpg_lock);
+	list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
+	spin_unlock_bh(&se_global->se_tpg_lock);
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
+		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
+		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_register);
+
+int core_tpg_deregister(struct se_portal_group *se_tpg)
+{
+	printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+		" for endpoint: %s Portal Tag %u\n",
+		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+		"Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
+		TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
+		TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+	spin_lock_bh(&se_global->se_tpg_lock);
+	list_del(&se_tpg->se_tpg_list);
+	spin_unlock_bh(&se_global->se_tpg_lock);
+
+	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
+		cpu_relax();
+
+	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
+		core_tpg_release_virtual_lun0(se_tpg);
+
+	se_tpg->se_tpg_fabric_ptr = NULL;
+	kfree(se_tpg->tpg_lun_list);
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_deregister);
+
+struct se_lun *core_tpg_pre_addlun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+			"-1: %u for Target Portal Group: %u\n",
+			TPG_TFO(tpg)->get_fabric_name(),
+			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		return ERR_PTR(-EOVERFLOW);
+	}
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
+		printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
+			" on %s Target Portal Group: %u, ignoring request.\n",
+			unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return ERR_PTR(-EINVAL);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+int core_tpg_post_addlun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u32 lun_access,
+	void *lun_ptr)
+{
+	if (core_dev_export(lun_ptr, tpg, lun) < 0)
+		return -1;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun->lun_access = lun_access;
+	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return 0;
+}
+
+static void core_tpg_shutdown_lun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	core_clear_lun_from_tpg(lun, tpg);
+	transport_clear_lun_from_sessions(lun);
+}
+
+struct se_lun *core_tpg_pre_dellun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun,
+	int *ret)
+{
+	struct se_lun *lun;
+
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+			"-1: %u for Target Portal Group: %u\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		return ERR_PTR(-EOVERFLOW);
+	}
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %u, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return ERR_PTR(-ENODEV);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+int core_tpg_post_dellun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	core_tpg_shutdown_lun(tpg, lun);
+
+	core_dev_unexport(lun->lun_se_dev, tpg, lun);
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return 0;
+}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
new file mode 100644
index 0000000..28b6292ff
--- /dev/null
+++ b/drivers/target/target_core_transport.c
@@ -0,0 +1,6134 @@
+/*******************************************************************************
+ * Filename:  target_core_transport.c
+ *
+ * This file contains the Generic Target Engine Core.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <linux/cdrom.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_scdb.h"
+#include "target_core_ua.h"
+
+/* #define DEBUG_CDB_HANDLER */
+#ifdef DEBUG_CDB_HANDLER
+#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CDB_H(x...)
+#endif
+
+/* #define DEBUG_CMD_MAP */
+#ifdef DEBUG_CMD_MAP
+#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CMD_M(x...)
+#endif
+
+/* #define DEBUG_MEM_ALLOC */
+#ifdef DEBUG_MEM_ALLOC
+#define DEBUG_MEM(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM(x...)
+#endif
+
+/* #define DEBUG_MEM2_ALLOC */
+#ifdef DEBUG_MEM2_ALLOC
+#define DEBUG_MEM2(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM2(x...)
+#endif
+
+/* #define DEBUG_SG_CALC */
+#ifdef DEBUG_SG_CALC
+#define DEBUG_SC(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SC(x...)
+#endif
+
+/* #define DEBUG_SE_OBJ */
+#ifdef DEBUG_SE_OBJ
+#define DEBUG_SO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SO(x...)
+#endif
+
+/* #define DEBUG_CMD_VOL */
+#ifdef DEBUG_CMD_VOL
+#define DEBUG_VOL(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_VOL(x...)
+#endif
+
+/* #define DEBUG_CMD_STOP */
+#ifdef DEBUG_CMD_STOP
+#define DEBUG_CS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CS(x...)
+#endif
+
+/* #define DEBUG_PASSTHROUGH */
+#ifdef DEBUG_PASSTHROUGH
+#define DEBUG_PT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_PT(x...)
+#endif
+
+/* #define DEBUG_TASK_STOP */
+#ifdef DEBUG_TASK_STOP
+#define DEBUG_TS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TS(x...)
+#endif
+
+/* #define DEBUG_TRANSPORT_STOP */
+#ifdef DEBUG_TRANSPORT_STOP
+#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TRANSPORT_S(x...)
+#endif
+
+/* #define DEBUG_TASK_FAILURE */
+#ifdef DEBUG_TASK_FAILURE
+#define DEBUG_TF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TF(x...)
+#endif
+
+/* #define DEBUG_DEV_OFFLINE */
+#ifdef DEBUG_DEV_OFFLINE
+#define DEBUG_DO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_DO(x...)
+#endif
+
+/* #define DEBUG_TASK_STATE */
+#ifdef DEBUG_TASK_STATE
+#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TSTATE(x...)
+#endif
+
+/* #define DEBUG_STATUS_THR */
+#ifdef DEBUG_STATUS_THR
+#define DEBUG_ST(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_ST(x...)
+#endif
+
+/* #define DEBUG_TASK_TIMEOUT */
+#ifdef DEBUG_TASK_TIMEOUT
+#define DEBUG_TT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TT(x...)
+#endif
+
+/* #define DEBUG_GENERIC_REQUEST_FAILURE */
+#ifdef DEBUG_GENERIC_REQUEST_FAILURE
+#define DEBUG_GRF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_GRF(x...)
+#endif
+
+/* #define DEBUG_SAM_TASK_ATTRS */
+#ifdef DEBUG_SAM_TASK_ATTRS
+#define DEBUG_STA(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_STA(x...)
+#endif
+
+struct se_global *se_global;
+
+static struct kmem_cache *se_cmd_cache;
+static struct kmem_cache *se_sess_cache;
+struct kmem_cache *se_tmr_req_cache;
+struct kmem_cache *se_ua_cache;
+struct kmem_cache *se_mem_cache;
+struct kmem_cache *t10_pr_reg_cache;
+struct kmem_cache *t10_alua_lu_gp_cache;
+struct kmem_cache *t10_alua_lu_gp_mem_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+/* Used for transport_dev_get_map_*() */
+typedef int (*map_func_t)(struct se_task *, u32);
+
+static int transport_generic_write_pending(struct se_cmd *);
+static int transport_processing_thread(void *);
+static int __transport_execute_tasks(struct se_device *dev);
+static void transport_complete_task_attr(struct se_cmd *cmd);
+static void transport_direct_request_timeout(struct se_cmd *cmd);
+static void transport_free_dev_tasks(struct se_cmd *cmd);
+static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
+		unsigned long long starting_lba, u32 sectors,
+		enum dma_data_direction data_direction,
+		struct list_head *mem_list, int set_counts);
+static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
+		u32 dma_size);
+static int transport_generic_remove(struct se_cmd *cmd,
+		int release_to_pool, int session_reinstatement);
+static int transport_get_sectors(struct se_cmd *cmd);
+static struct list_head *transport_init_se_mem_list(void);
+static int transport_map_sg_to_mem(struct se_cmd *cmd,
+		struct list_head *se_mem_list, void *in_mem,
+		u32 *se_mem_cnt);
+static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
+		unsigned char *dst, struct list_head *se_mem_list);
+static void transport_release_fe_cmd(struct se_cmd *cmd);
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+		struct se_queue_obj *qobj);
+static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
+static void transport_stop_all_task_timers(struct se_cmd *cmd);
+
+int transport_emulate_control_cdb(struct se_task *task);
+
+int init_se_global(void)
+{
+	struct se_global *global;
+
+	global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
+	if (!(global)) {
+		printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
+		return -1;
+	}
+
+	INIT_LIST_HEAD(&global->g_lu_gps_list);
+	INIT_LIST_HEAD(&global->g_se_tpg_list);
+	INIT_LIST_HEAD(&global->g_hba_list);
+	INIT_LIST_HEAD(&global->g_se_dev_list);
+	spin_lock_init(&global->g_device_lock);
+	spin_lock_init(&global->hba_lock);
+	spin_lock_init(&global->se_tpg_lock);
+	spin_lock_init(&global->lu_gps_lock);
+	spin_lock_init(&global->plugin_class_lock);
+
+	se_cmd_cache = kmem_cache_create("se_cmd_cache",
+			sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
+	if (!(se_cmd_cache)) {
+		printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+		goto out;
+	}
+	se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
+			sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
+			0, NULL);
+	if (!(se_tmr_req_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+				" failed\n");
+		goto out;
+	}
+	se_sess_cache = kmem_cache_create("se_sess_cache",
+			sizeof(struct se_session), __alignof__(struct se_session),
+			0, NULL);
+	if (!(se_sess_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_session"
+				" failed\n");
+		goto out;
+	}
+	se_ua_cache = kmem_cache_create("se_ua_cache",
+			sizeof(struct se_ua), __alignof__(struct se_ua),
+			0, NULL);
+	if (!(se_ua_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
+		goto out;
+	}
+	se_mem_cache = kmem_cache_create("se_mem_cache",
+			sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
+	if (!(se_mem_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
+		goto out;
+	}
+	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
+			sizeof(struct t10_pr_registration),
+			__alignof__(struct t10_pr_registration), 0, NULL);
+	if (!(t10_pr_reg_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+				" failed\n");
+		goto out;
+	}
+	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
+			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
+			0, NULL);
+	if (!(t10_alua_lu_gp_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+				" failed\n");
+		goto out;
+	}
+	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
+			sizeof(struct t10_alua_lu_gp_member),
+			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
+	if (!(t10_alua_lu_gp_mem_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+				"cache failed\n");
+		goto out;
+	}
+	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
+			sizeof(struct t10_alua_tg_pt_gp),
+			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
+	if (!(t10_alua_tg_pt_gp_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+				"cache failed\n");
+		goto out;
+	}
+	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
+			"t10_alua_tg_pt_gp_mem_cache",
+			sizeof(struct t10_alua_tg_pt_gp_member),
+			__alignof__(struct t10_alua_tg_pt_gp_member),
+			0, NULL);
+	if (!(t10_alua_tg_pt_gp_mem_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+				"mem_t failed\n");
+		goto out;
+	}
+
+	se_global = global;
+
+	return 0;
+out:
+	if (se_cmd_cache)
+		kmem_cache_destroy(se_cmd_cache);
+	if (se_tmr_req_cache)
+		kmem_cache_destroy(se_tmr_req_cache);
+	if (se_sess_cache)
+		kmem_cache_destroy(se_sess_cache);
+	if (se_ua_cache)
+		kmem_cache_destroy(se_ua_cache);
+	if (se_mem_cache)
+		kmem_cache_destroy(se_mem_cache);
+	if (t10_pr_reg_cache)
+		kmem_cache_destroy(t10_pr_reg_cache);
+	if (t10_alua_lu_gp_cache)
+		kmem_cache_destroy(t10_alua_lu_gp_cache);
+	if (t10_alua_lu_gp_mem_cache)
+		kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+	if (t10_alua_tg_pt_gp_cache)
+		kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+	if (t10_alua_tg_pt_gp_mem_cache)
+		kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+	kfree(global);
+	return -1;
+}
+
+void release_se_global(void)
+{
+	struct se_global *global;
+
+	global = se_global;
+	if (!(global))
+		return;
+
+	kmem_cache_destroy(se_cmd_cache);
+	kmem_cache_destroy(se_tmr_req_cache);
+	kmem_cache_destroy(se_sess_cache);
+	kmem_cache_destroy(se_ua_cache);
+	kmem_cache_destroy(se_mem_cache);
+	kmem_cache_destroy(t10_pr_reg_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+	kfree(global);
+
+	se_global = NULL;
+}
+
+void transport_init_queue_obj(struct se_queue_obj *qobj)
+{
+	atomic_set(&qobj->queue_cnt, 0);
+	INIT_LIST_HEAD(&qobj->qobj_list);
+	init_waitqueue_head(&qobj->thread_wq);
+	spin_lock_init(&qobj->cmd_queue_lock);
+}
+EXPORT_SYMBOL(transport_init_queue_obj);
+
+static int transport_subsystem_reqmods(void)
+{
+	int ret;
+
+	ret = request_module("target_core_iblock");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_iblock\n");
+
+	ret = request_module("target_core_file");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_file\n");
+
+	ret = request_module("target_core_pscsi");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_pscsi\n");
+
+	ret = request_module("target_core_stgt");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_stgt\n");
+
+	return 0;
+}
+
+int transport_subsystem_check_init(void)
+{
+	if (se_global->g_sub_api_initialized)
+		return 0;
+	/*
+	 * Request the loading of known TCM subsystem plugins..
+	 */
+	if (transport_subsystem_reqmods() < 0)
+		return -1;
+
+	se_global->g_sub_api_initialized = 1;
+	return 0;
+}
+
+struct se_session *transport_init_session(void)
+{
+	struct se_session *se_sess;
+
+	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
+	if (!(se_sess)) {
+		printk(KERN_ERR "Unable to allocate struct se_session from"
+				" se_sess_cache\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&se_sess->sess_list);
+	INIT_LIST_HEAD(&se_sess->sess_acl_list);
+	atomic_set(&se_sess->mib_ref_count, 0);
+
+	return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/*
+ * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
+ */
+void __transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	unsigned char buf[PR_REG_ISID_LEN];
+
+	se_sess->se_tpg = se_tpg;
+	se_sess->fabric_sess_ptr = fabric_sess_ptr;
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
+	 *
+	 * Only set for struct se_session's that will actually be moving I/O.
+	 * eg: *NOT* discovery sessions.
+	 */
+	if (se_nacl) {
+		/*
+		 * If the fabric module supports an ISID based TransportID,
+		 * save this value in binary from the fabric I_T Nexus now.
+		 */
+		if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+			memset(&buf[0], 0, PR_REG_ISID_LEN);
+			TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
+					&buf[0], PR_REG_ISID_LEN);
+			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
+		}
+		spin_lock_irq(&se_nacl->nacl_sess_lock);
+		/*
+		 * The se_nacl->nacl_sess pointer will be set to the
+		 * last active I_T Nexus for each struct se_node_acl.
+		 */
+		se_nacl->nacl_sess = se_sess;
+
+		list_add_tail(&se_sess->sess_acl_list,
+			      &se_nacl->acl_sess_list);
+		spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	}
+	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+		TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
+}
+EXPORT_SYMBOL(__transport_register_session);
+
+void transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	spin_lock_bh(&se_tpg->session_lock);
+	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
+	spin_unlock_bh(&se_tpg->session_lock);
+}
+EXPORT_SYMBOL(transport_register_session);
+
+void transport_deregister_session_configfs(struct se_session *se_sess)
+{
+	struct se_node_acl *se_nacl;
+
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
+	 */
+	se_nacl = se_sess->se_node_acl;
+	if ((se_nacl)) {
+		spin_lock_irq(&se_nacl->nacl_sess_lock);
+		list_del(&se_sess->sess_acl_list);
+		/*
+		 * If the session list is empty, then clear the pointer.
+		 * Otherwise, set the struct se_session pointer from the tail
+		 * element of the per struct se_node_acl active session list.
+		 */
+		if (list_empty(&se_nacl->acl_sess_list))
+			se_nacl->nacl_sess = NULL;
+		else {
+			se_nacl->nacl_sess = container_of(
+					se_nacl->acl_sess_list.prev,
+					struct se_session, sess_acl_list);
+		}
+		spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	}
+}
+EXPORT_SYMBOL(transport_deregister_session_configfs);
+
+void transport_free_session(struct se_session *se_sess)
+{
+	kmem_cache_free(se_sess_cache, se_sess);
+}
+EXPORT_SYMBOL(transport_free_session);
+
+void transport_deregister_session(struct se_session *se_sess)
+{
+	struct se_portal_group *se_tpg = se_sess->se_tpg;
+	struct se_node_acl *se_nacl;
+
+	if (!(se_tpg)) {
+		transport_free_session(se_sess);
+		return;
+	}
+	/*
+	 * Wait for possible reference in drivers/target/target_core_mib.c:
+	 * scsi_att_intr_port_seq_show()
+	 */
+	while (atomic_read(&se_sess->mib_ref_count) != 0)
+		cpu_relax();
+
+	spin_lock_bh(&se_tpg->session_lock);
+	list_del(&se_sess->sess_list);
+	se_sess->se_tpg = NULL;
+	se_sess->fabric_sess_ptr = NULL;
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	/*
+	 * Determine if we need to do extra work for this initiator node's
+	 * struct se_node_acl if it had been previously dynamically generated.
+	 */
+	se_nacl = se_sess->se_node_acl;
+	if ((se_nacl)) {
+		spin_lock_bh(&se_tpg->acl_node_lock);
+		if (se_nacl->dynamic_node_acl) {
+			if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
+					se_tpg))) {
+				list_del(&se_nacl->acl_list);
+				se_tpg->num_node_acls--;
+				spin_unlock_bh(&se_tpg->acl_node_lock);
+
+				core_tpg_wait_for_nacl_pr_ref(se_nacl);
+				core_tpg_wait_for_mib_ref(se_nacl);
+				core_free_device_list_for_node(se_nacl, se_tpg);
+				TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
+						se_nacl);
+				spin_lock_bh(&se_tpg->acl_node_lock);
+			}
+		}
+		spin_unlock_bh(&se_tpg->acl_node_lock);
+	}
+
+	transport_free_session(se_sess);
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
+		TPG_TFO(se_tpg)->get_fabric_name());
+}
+EXPORT_SYMBOL(transport_deregister_session);
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
+{
+	struct se_device *dev;
+	struct se_task *task;
+	unsigned long flags;
+
+	if (!T_TASK(cmd))
+		return;
+
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		dev = task->se_dev;
+		if (!(dev))
+			continue;
+
+		if (atomic_read(&task->task_active))
+			continue;
+
+		if (!(atomic_read(&task->task_state_active)))
+			continue;
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+		list_del(&task->t_state_list);
+		DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
+			CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+		atomic_set(&task->task_state_active, 0);
+		atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
+	}
+}
+
+/*	transport_cmd_check_stop():
+ *
+ *	'transport_off = 1' determines if t_transport_active should be cleared.
+ *	'transport_off = 2' determines if task_dev_state should be removed.
+ *
+ *	A non-zero u8 t_state sets cmd->t_state.
+ *	Returns 1 when command is stopped, else 0.
+ */
+static int transport_cmd_check_stop(
+	struct se_cmd *cmd,
+	int transport_off,
+	u8 t_state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * Determine if IOCTL context caller in requesting the stopping of this
+	 * command for LUN shutdown purposes.
+	 */
+	if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
+			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		cmd->deferred_t_state = cmd->t_state;
+		cmd->t_state = TRANSPORT_DEFERRED_CMD;
+		atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+		if (transport_off == 2)
+			transport_all_task_dev_remove_state(cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		complete(&T_TASK(cmd)->transport_lun_stop_comp);
+		return 1;
+	}
+	/*
+	 * Determine if frontend context caller is requesting the stopping of
+	 * this command for frontend excpections.
+	 */
+	if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
+			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		cmd->deferred_t_state = cmd->t_state;
+		cmd->t_state = TRANSPORT_DEFERRED_CMD;
+		if (transport_off == 2)
+			transport_all_task_dev_remove_state(cmd);
+
+		/*
+		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
+		 * to FE.
+		 */
+		if (transport_off == 2)
+			cmd->se_lun = NULL;
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		complete(&T_TASK(cmd)->t_transport_stop_comp);
+		return 1;
+	}
+	if (transport_off) {
+		atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+		if (transport_off == 2) {
+			transport_all_task_dev_remove_state(cmd);
+			/*
+			 * Clear struct se_cmd->se_lun before the transport_off == 2
+			 * handoff to fabric module.
+			 */
+			cmd->se_lun = NULL;
+			/*
+			 * Some fabric modules like tcm_loop can release
+			 * their internally allocated I/O refrence now and
+			 * struct se_cmd now.
+			 */
+			if (CMD_TFO(cmd)->check_stop_free != NULL) {
+				spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+
+				CMD_TFO(cmd)->check_stop_free(cmd);
+				return 1;
+			}
+		}
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		return 0;
+	} else if (t_state)
+		cmd->t_state = t_state;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return 0;
+}
+
+static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+{
+	return transport_cmd_check_stop(cmd, 2, 0);
+}
+
+static void transport_lun_remove_cmd(struct se_cmd *cmd)
+{
+	struct se_lun *lun = SE_LUN(cmd);
+	unsigned long flags;
+
+	if (!lun)
+		return;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		goto check_lun;
+	}
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+	transport_all_task_dev_remove_state(cmd);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_free_dev_tasks(cmd);
+
+check_lun:
+	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
+		list_del(&cmd->se_lun_list);
+		atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+#if 0
+		printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
+			CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
+#endif
+	}
+	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+}
+
+void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+{
+	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+	transport_lun_remove_cmd(cmd);
+
+	if (transport_cmd_check_stop_to_fabric(cmd))
+		return;
+	if (remove)
+		transport_generic_remove(cmd, 0, 0);
+}
+
+void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
+{
+	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+	if (transport_cmd_check_stop_to_fabric(cmd))
+		return;
+
+	transport_generic_remove(cmd, 0, 0);
+}
+
+static int transport_add_cmd_to_queue(
+	struct se_cmd *cmd,
+	int t_state)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_queue_obj *qobj = dev->dev_queue_obj;
+	struct se_queue_req *qr;
+	unsigned long flags;
+
+	qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
+	if (!(qr)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct se_queue_req\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&qr->qr_list);
+
+	qr->cmd = (void *)cmd;
+	qr->state = t_state;
+
+	if (t_state) {
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+		cmd->t_state = t_state;
+		atomic_set(&T_TASK(cmd)->t_transport_active, 1);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	}
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	list_add_tail(&qr->qr_list, &qobj->qobj_list);
+	atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	atomic_inc(&qobj->queue_cnt);
+	wake_up_interruptible(&qobj->thread_wq);
+	return 0;
+}
+
+/*
+ * Called with struct se_queue_obj->cmd_queue_lock held.
+ */
+static struct se_queue_req *
+__transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr = NULL;
+
+	if (list_empty(&qobj->qobj_list))
+		return NULL;
+
+	list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+		break;
+
+	if (qr->cmd) {
+		cmd = (struct se_cmd *)qr->cmd;
+		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+	}
+	list_del(&qr->qr_list);
+	atomic_dec(&qobj->queue_cnt);
+
+	return qr;
+}
+
+static struct se_queue_req *
+transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	if (list_empty(&qobj->qobj_list)) {
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+		return NULL;
+	}
+
+	list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+		break;
+
+	if (qr->cmd) {
+		cmd = (struct se_cmd *)qr->cmd;
+		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+	}
+	list_del(&qr->qr_list);
+	atomic_dec(&qobj->queue_cnt);
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	return qr;
+}
+
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+		struct se_queue_obj *qobj)
+{
+	struct se_cmd *q_cmd;
+	struct se_queue_req *qr = NULL, *qr_p = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+		return;
+	}
+
+	list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
+		q_cmd = (struct se_cmd *)qr->cmd;
+		if (q_cmd != cmd)
+			continue;
+
+		atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
+		atomic_dec(&qobj->queue_cnt);
+		list_del(&qr->qr_list);
+		kfree(qr);
+	}
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
+		printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
+			CMD_TFO(cmd)->get_task_tag(cmd),
+			atomic_read(&T_TASK(cmd)->t_transport_queue_active));
+	}
+}
+
+/*
+ * Completion function used by TCM subsystem plugins (such as FILEIO)
+ * for queueing up response from struct se_subsystem_api->do_task()
+ */
+void transport_complete_sync_cache(struct se_cmd *cmd, int good)
+{
+	struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
+				struct se_task, t_list);
+
+	if (good) {
+		cmd->scsi_status = SAM_STAT_GOOD;
+		task->task_scsi_status = GOOD;
+	} else {
+		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+		task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
+		TASK_CMD(task)->transport_error_status =
+					PYX_TRANSPORT_ILLEGAL_REQUEST;
+	}
+
+	transport_complete_task(task, good);
+}
+EXPORT_SYMBOL(transport_complete_sync_cache);
+
+/*	transport_complete_task():
+ *
+ *	Called from interrupt and non interrupt context depending
+ *	on the transport plugin.
+ */
+void transport_complete_task(struct se_task *task, int success)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = task->se_dev;
+	int t_state;
+	unsigned long flags;
+#if 0
+	printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+			T_TASK(cmd)->t_task_cdb[0], dev);
+#endif
+	if (dev) {
+		spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+		atomic_inc(&dev->depth_left);
+		atomic_inc(&SE_HBA(dev)->left_queue_depth);
+		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+	}
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	atomic_set(&task->task_active, 0);
+
+	/*
+	 * See if any sense data exists, if so set the TASK_SENSE flag.
+	 * Also check for any other post completion work that needs to be
+	 * done by the plugins.
+	 */
+	if (dev && dev->transport->transport_complete) {
+		if (dev->transport->transport_complete(task) != 0) {
+			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+			task->task_sense = 1;
+			success = 1;
+		}
+	}
+
+	/*
+	 * See if we are waiting for outstanding struct se_task
+	 * to complete for an exception condition
+	 */
+	if (atomic_read(&task->task_stop)) {
+		/*
+		 * Decrement T_TASK(cmd)->t_se_count if this task had
+		 * previously thrown its timeout exception handler.
+		 */
+		if (atomic_read(&task->task_timeout)) {
+			atomic_dec(&T_TASK(cmd)->t_se_count);
+			atomic_set(&task->task_timeout, 0);
+		}
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		complete(&task->task_stop_comp);
+		return;
+	}
+	/*
+	 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
+	 * left counter to determine when the struct se_cmd is ready to be queued to
+	 * the processing thread.
+	 */
+	if (atomic_read(&task->task_timeout)) {
+		if (!(atomic_dec_and_test(
+				&T_TASK(cmd)->t_task_cdbs_timeout_left))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+				flags);
+			return;
+		}
+		t_state = TRANSPORT_COMPLETE_TIMEOUT;
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		transport_add_cmd_to_queue(cmd, t_state);
+		return;
+	}
+	atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
+
+	/*
+	 * Decrement the outstanding t_task_cdbs_left count.  The last
+	 * struct se_task from struct se_cmd will complete itself into the
+	 * device queue depending upon int success.
+	 */
+	if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+		if (!success)
+			T_TASK(cmd)->t_tasks_failed = 1;
+
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+
+	if (!success || T_TASK(cmd)->t_tasks_failed) {
+		t_state = TRANSPORT_COMPLETE_FAILURE;
+		if (!task->task_error_status) {
+			task->task_error_status =
+				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+			cmd->transport_error_status =
+				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+	} else {
+		atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
+		t_state = TRANSPORT_COMPLETE_OK;
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_add_cmd_to_queue(cmd, t_state);
+}
+EXPORT_SYMBOL(transport_complete_task);
+
+/*
+ * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
+ * struct se_task list are ready to be added to the active execution list
+ * struct se_device
+
+ * Called with se_dev_t->execute_task_lock called.
+ */
+static inline int transport_add_task_check_sam_attr(
+	struct se_task *task,
+	struct se_task *task_prev,
+	struct se_device *dev)
+{
+	/*
+	 * No SAM Task attribute emulation enabled, add to tail of
+	 * execution queue
+	 */
+	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
+		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+		return 0;
+	}
+	/*
+	 * HEAD_OF_QUEUE attribute for received CDB, which means
+	 * the first task that is associated with a struct se_cmd goes to
+	 * head of the struct se_device->execute_task_list, and task_prev
+	 * after that for each subsequent task
+	 */
+	if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
+		list_add(&task->t_execute_list,
+				(task_prev != NULL) ?
+				&task_prev->t_execute_list :
+				&dev->execute_task_list);
+
+		DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+				" in execution queue\n",
+				T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+		return 1;
+	}
+	/*
+	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
+	 * transitioned from Dermant -> Active state, and are added to the end
+	 * of the struct se_device->execute_task_list
+	 */
+	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+	return 0;
+}
+
+/*	__transport_add_task_to_execute_queue():
+ *
+ *	Called with se_dev_t->execute_task_lock called.
+ */
+static void __transport_add_task_to_execute_queue(
+	struct se_task *task,
+	struct se_task *task_prev,
+	struct se_device *dev)
+{
+	int head_of_queue;
+
+	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
+	atomic_inc(&dev->execute_tasks);
+
+	if (atomic_read(&task->task_state_active))
+		return;
+	/*
+	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
+	 * state list as well.  Running with SAM Task Attribute emulation
+	 * will always return head_of_queue == 0 here
+	 */
+	if (head_of_queue)
+		list_add(&task->t_state_list, (task_prev) ?
+				&task_prev->t_state_list :
+				&dev->state_task_list);
+	else
+		list_add_tail(&task->t_state_list, &dev->state_task_list);
+
+	atomic_set(&task->task_state_active, 1);
+
+	DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+		CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
+		task, dev);
+}
+
+static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
+{
+	struct se_device *dev;
+	struct se_task *task;
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		dev = task->se_dev;
+
+		if (atomic_read(&task->task_state_active))
+			continue;
+
+		spin_lock(&dev->execute_task_lock);
+		list_add_tail(&task->t_state_list, &dev->state_task_list);
+		atomic_set(&task->task_state_active, 1);
+
+		DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+			CMD_TFO(task->task_se_cmd)->get_task_tag(
+			task->task_se_cmd), task, dev);
+
+		spin_unlock(&dev->execute_task_lock);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_task *task, *task_prev = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		if (atomic_read(&task->task_execute_queue))
+			continue;
+		/*
+		 * __transport_add_task_to_execute_queue() handles the
+		 * SAM Task Attribute emulation if enabled
+		 */
+		__transport_add_task_to_execute_queue(task, task_prev, dev);
+		atomic_set(&task->task_execute_queue, 1);
+		task_prev = task;
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+	return;
+}
+
+/*	transport_get_task_from_execute_queue():
+ *
+ *	Called with dev->execute_task_lock held.
+ */
+static struct se_task *
+transport_get_task_from_execute_queue(struct se_device *dev)
+{
+	struct se_task *task;
+
+	if (list_empty(&dev->execute_task_list))
+		return NULL;
+
+	list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
+		break;
+
+	list_del(&task->t_execute_list);
+	atomic_dec(&dev->execute_tasks);
+
+	return task;
+}
+
+/*	transport_remove_task_from_execute_queue():
+ *
+ *
+ */
+static void transport_remove_task_from_execute_queue(
+	struct se_task *task,
+	struct se_device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_del(&task->t_execute_list);
+	atomic_dec(&dev->execute_tasks);
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+{
+	switch (cmd->data_direction) {
+	case DMA_NONE:
+		return "NONE";
+	case DMA_FROM_DEVICE:
+		return "READ";
+	case DMA_TO_DEVICE:
+		return "WRITE";
+	case DMA_BIDIRECTIONAL:
+		return "BIDI";
+	default:
+		break;
+	}
+
+	return "UNKNOWN";
+}
+
+void transport_dump_dev_state(
+	struct se_device *dev,
+	char *b,
+	int *bl)
+{
+	*bl += sprintf(b + *bl, "Status: ");
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		*bl += sprintf(b + *bl, "ACTIVATED");
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+		*bl += sprintf(b + *bl, "DEACTIVATED");
+		break;
+	case TRANSPORT_DEVICE_SHUTDOWN:
+		*bl += sprintf(b + *bl, "SHUTDOWN");
+		break;
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+		*bl += sprintf(b + *bl, "OFFLINE");
+		break;
+	default:
+		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
+		break;
+	}
+
+	*bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
+		atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
+		dev->queue_depth);
+	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
+		DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
+	*bl += sprintf(b + *bl, "        ");
+}
+
+/*	transport_release_all_cmds():
+ *
+ *
+ */
+static void transport_release_all_cmds(struct se_device *dev)
+{
+	struct se_cmd *cmd = NULL;
+	struct se_queue_req *qr = NULL, *qr_p = NULL;
+	int bug_out = 0, t_state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
+				qr_list) {
+
+		cmd = (struct se_cmd *)qr->cmd;
+		t_state = qr->state;
+		list_del(&qr->qr_list);
+		kfree(qr);
+		spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
+				flags);
+
+		printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+			" t_state: %u directly\n",
+			CMD_TFO(cmd)->get_task_tag(cmd),
+			CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
+
+		transport_release_fe_cmd(cmd);
+		bug_out = 1;
+
+		spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+#if 0
+	if (bug_out)
+		BUG();
+#endif
+}
+
+void transport_dump_vpd_proto_id(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
+
+	switch (vpd->protocol_identifier) {
+	case 0x00:
+		sprintf(buf+len, "Fibre Channel\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "Parallel SCSI\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SSA\n");
+		break;
+	case 0x30:
+		sprintf(buf+len, "IEEE 1394\n");
+		break;
+	case 0x40:
+		sprintf(buf+len, "SCSI Remote Direct Memory Access"
+				" Protocol\n");
+		break;
+	case 0x50:
+		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
+		break;
+	case 0x60:
+		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
+		break;
+	case 0x70:
+		sprintf(buf+len, "Automation/Drive Interface Transport"
+				" Protocol\n");
+		break;
+	case 0x80:
+		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n",
+				vpd->protocol_identifier);
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk(KERN_INFO "%s", buf);
+}
+
+void
+transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * Check if the Protocol Identifier Valid (PIV) bit is set..
+	 *
+	 * from spc3r23.pdf section 7.5.1
+	 */
+	 if (page_83[1] & 0x80) {
+		vpd->protocol_identifier = (page_83[0] & 0xf0);
+		vpd->protocol_identifier_set = 1;
+		transport_dump_vpd_proto_id(vpd, NULL, 0);
+	}
+}
+EXPORT_SYMBOL(transport_set_vpd_proto_id);
+
+int transport_dump_vpd_assoc(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0, len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Association: ");
+
+	switch (vpd->association) {
+	case 0x00:
+		sprintf(buf+len, "addressed logical unit\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "target port\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SCSI target device\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
+		ret = -1;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk("%s", buf);
+
+	return ret;
+}
+
+int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identification association..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 297
+	 */
+	vpd->association = (page_83[1] & 0x30);
+	return transport_dump_vpd_assoc(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_assoc);
+
+int transport_dump_vpd_ident_type(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0, len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Type: ");
+
+	switch (vpd->device_identifier_type) {
+	case 0x00:
+		sprintf(buf+len, "Vendor specific\n");
+		break;
+	case 0x01:
+		sprintf(buf+len, "T10 Vendor ID based\n");
+		break;
+	case 0x02:
+		sprintf(buf+len, "EUI-64 based\n");
+		break;
+	case 0x03:
+		sprintf(buf+len, "NAA\n");
+		break;
+	case 0x04:
+		sprintf(buf+len, "Relative target port identifier\n");
+		break;
+	case 0x08:
+		sprintf(buf+len, "SCSI name string\n");
+		break;
+	default:
+		sprintf(buf+len, "Unsupported: 0x%02x\n",
+				vpd->device_identifier_type);
+		ret = -1;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk("%s", buf);
+
+	return ret;
+}
+
+int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identifier type..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 298
+	 */
+	vpd->device_identifier_type = (page_83[1] & 0x0f);
+	return transport_dump_vpd_ident_type(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident_type);
+
+int transport_dump_vpd_ident(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x02: /* ASCII */
+		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x03: /* UTF-8 */
+		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	default:
+		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
+			" 0x%02x", vpd->device_identifier_code_set);
+		ret = -1;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk("%s", buf);
+
+	return ret;
+}
+
+int
+transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	static const char hex_str[] = "0123456789abcdef";
+	int j = 0, i = 4; /* offset to start of the identifer */
+
+	/*
+	 * The VPD Code Set (encoding)
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 296
+	 */
+	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		vpd->device_identifier[j++] =
+				hex_str[vpd->device_identifier_type];
+		while (i < (4 + page_83[3])) {
+			vpd->device_identifier[j++] =
+				hex_str[(page_83[i] & 0xf0) >> 4];
+			vpd->device_identifier[j++] =
+				hex_str[page_83[i] & 0x0f];
+			i++;
+		}
+		break;
+	case 0x02: /* ASCII */
+	case 0x03: /* UTF-8 */
+		while (i < (4 + page_83[3]))
+			vpd->device_identifier[j++] = page_83[i++];
+		break;
+	default:
+		break;
+	}
+
+	return transport_dump_vpd_ident(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident);
+
+static void core_setup_task_attr_emulation(struct se_device *dev)
+{
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, disable the
+	 * SAM Task Attribute emulation.
+	 *
+	 * This is currently not available in upsream Linux/SCSI Target
+	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
+	 */
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
+		return;
+	}
+
+	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
+	DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+		" device\n", TRANSPORT(dev)->name,
+		TRANSPORT(dev)->get_device_rev(dev));
+}
+
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+	struct t10_wwn *wwn = DEV_T10_WWN(dev);
+	int i, device_type;
+	/*
+	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+	 */
+	printk("  Vendor: ");
+	for (i = 0; i < 8; i++)
+		if (wwn->vendor[i] >= 0x20)
+			printk("%c", wwn->vendor[i]);
+		else
+			printk(" ");
+
+	printk("  Model: ");
+	for (i = 0; i < 16; i++)
+		if (wwn->model[i] >= 0x20)
+			printk("%c", wwn->model[i]);
+		else
+			printk(" ");
+
+	printk("  Revision: ");
+	for (i = 0; i < 4; i++)
+		if (wwn->revision[i] >= 0x20)
+			printk("%c", wwn->revision[i]);
+		else
+			printk(" ");
+
+	printk("\n");
+
+	device_type = TRANSPORT(dev)->get_device_type(dev);
+	printk("  Type:   %s ", scsi_device_type(device_type));
+	printk("                 ANSI SCSI revision: %02x\n",
+				TRANSPORT(dev)->get_device_rev(dev));
+}
+
+struct se_device *transport_add_device_to_core_hba(
+	struct se_hba *hba,
+	struct se_subsystem_api *transport,
+	struct se_subsystem_dev *se_dev,
+	u32 device_flags,
+	void *transport_dev,
+	struct se_dev_limits *dev_limits,
+	const char *inquiry_prod,
+	const char *inquiry_rev)
+{
+	int ret = 0, force_pt;
+	struct se_device  *dev;
+
+	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
+	if (!(dev)) {
+		printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
+		return NULL;
+	}
+	dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
+	if (!(dev->dev_queue_obj)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" dev->dev_queue_obj\n");
+		kfree(dev);
+		return NULL;
+	}
+	transport_init_queue_obj(dev->dev_queue_obj);
+
+	dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
+					GFP_KERNEL);
+	if (!(dev->dev_status_queue_obj)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" dev->dev_status_queue_obj\n");
+		kfree(dev->dev_queue_obj);
+		kfree(dev);
+		return NULL;
+	}
+	transport_init_queue_obj(dev->dev_status_queue_obj);
+
+	dev->dev_flags		= device_flags;
+	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
+	dev->dev_ptr		= (void *) transport_dev;
+	dev->se_hba		= hba;
+	dev->se_sub_dev		= se_dev;
+	dev->transport		= transport;
+	atomic_set(&dev->active_cmds, 0);
+	INIT_LIST_HEAD(&dev->dev_list);
+	INIT_LIST_HEAD(&dev->dev_sep_list);
+	INIT_LIST_HEAD(&dev->dev_tmr_list);
+	INIT_LIST_HEAD(&dev->execute_task_list);
+	INIT_LIST_HEAD(&dev->delayed_cmd_list);
+	INIT_LIST_HEAD(&dev->ordered_cmd_list);
+	INIT_LIST_HEAD(&dev->state_task_list);
+	spin_lock_init(&dev->execute_task_lock);
+	spin_lock_init(&dev->delayed_cmd_lock);
+	spin_lock_init(&dev->ordered_cmd_lock);
+	spin_lock_init(&dev->state_task_lock);
+	spin_lock_init(&dev->dev_alua_lock);
+	spin_lock_init(&dev->dev_reservation_lock);
+	spin_lock_init(&dev->dev_status_lock);
+	spin_lock_init(&dev->dev_status_thr_lock);
+	spin_lock_init(&dev->se_port_lock);
+	spin_lock_init(&dev->se_tmr_lock);
+
+	dev->queue_depth	= dev_limits->queue_depth;
+	atomic_set(&dev->depth_left, dev->queue_depth);
+	atomic_set(&dev->dev_ordered_id, 0);
+
+	se_dev_set_default_attribs(dev, dev_limits);
+
+	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+	dev->creation_time = get_jiffies_64();
+	spin_lock_init(&dev->stats_lock);
+
+	spin_lock(&hba->device_lock);
+	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
+	hba->dev_count++;
+	spin_unlock(&hba->device_lock);
+	/*
+	 * Setup the SAM Task Attribute emulation for struct se_device
+	 */
+	core_setup_task_attr_emulation(dev);
+	/*
+	 * Force PR and ALUA passthrough emulation with internal object use.
+	 */
+	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
+	/*
+	 * Setup the Reservations infrastructure for struct se_device
+	 */
+	core_setup_reservations(dev, force_pt);
+	/*
+	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
+	 */
+	if (core_setup_alua(dev, force_pt) < 0)
+		goto out;
+
+	/*
+	 * Startup the struct se_device processing thread
+	 */
+	dev->process_thread = kthread_run(transport_processing_thread, dev,
+					  "LIO_%s", TRANSPORT(dev)->name);
+	if (IS_ERR(dev->process_thread)) {
+		printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
+			TRANSPORT(dev)->name);
+		goto out;
+	}
+
+	/*
+	 * Preload the initial INQUIRY const values if we are doing
+	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+	 * passthrough because this is being provided by the backend LLD.
+	 * This is required so that transport_get_inquiry() copies these
+	 * originals once back into DEV_T10_WWN(dev) for the virtual device
+	 * setup.
+	 */
+	if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (!(inquiry_prod) || !(inquiry_prod)) {
+			printk(KERN_ERR "All non TCM/pSCSI plugins require"
+				" INQUIRY consts\n");
+			goto out;
+		}
+
+		strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
+		strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
+		strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
+	}
+	scsi_dump_inquiry(dev);
+
+out:
+	if (!ret)
+		return dev;
+	kthread_stop(dev->process_thread);
+
+	spin_lock(&hba->device_lock);
+	list_del(&dev->dev_list);
+	hba->dev_count--;
+	spin_unlock(&hba->device_lock);
+
+	se_release_vpd_for_dev(dev);
+
+	kfree(dev->dev_status_queue_obj);
+	kfree(dev->dev_queue_obj);
+	kfree(dev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(transport_add_device_to_core_hba);
+
+/*	transport_generic_prepare_cdb():
+ *
+ *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
+ *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
+ *	The point of this is since we are mapping iSCSI LUNs to
+ *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
+ *	devices and HBAs for a loop.
+ */
+static inline void transport_generic_prepare_cdb(
+	unsigned char *cdb)
+{
+	switch (cdb[0]) {
+	case READ_10: /* SBC - RDProtect */
+	case READ_12: /* SBC - RDProtect */
+	case READ_16: /* SBC - RDProtect */
+	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+	case VERIFY: /* SBC - VRProtect */
+	case VERIFY_16: /* SBC - VRProtect */
+	case WRITE_VERIFY: /* SBC - VRProtect */
+	case WRITE_VERIFY_12: /* SBC - VRProtect */
+		break;
+	default:
+		cdb[1] &= 0x1f; /* clear logical unit number */
+		break;
+	}
+}
+
+static struct se_task *
+transport_generic_get_task(struct se_cmd *cmd,
+		enum dma_data_direction data_direction)
+{
+	struct se_task *task;
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned long flags;
+
+	task = dev->transport->alloc_task(cmd);
+	if (!task) {
+		printk(KERN_ERR "Unable to allocate struct se_task\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&task->t_list);
+	INIT_LIST_HEAD(&task->t_execute_list);
+	INIT_LIST_HEAD(&task->t_state_list);
+	init_completion(&task->task_stop_comp);
+	task->task_no = T_TASK(cmd)->t_tasks_no++;
+	task->task_se_cmd = cmd;
+	task->se_dev = dev;
+	task->task_data_direction = data_direction;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return task;
+}
+
+static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
+
+void transport_device_setup_cmd(struct se_cmd *cmd)
+{
+	cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
+}
+EXPORT_SYMBOL(transport_device_setup_cmd);
+
+/*
+ * Used by fabric modules containing a local struct se_cmd within their
+ * fabric dependent per I/O descriptor.
+ */
+void transport_init_se_cmd(
+	struct se_cmd *cmd,
+	struct target_core_fabric_ops *tfo,
+	struct se_session *se_sess,
+	u32 data_length,
+	int data_direction,
+	int task_attr,
+	unsigned char *sense_buffer)
+{
+	INIT_LIST_HEAD(&cmd->se_lun_list);
+	INIT_LIST_HEAD(&cmd->se_delayed_list);
+	INIT_LIST_HEAD(&cmd->se_ordered_list);
+	/*
+	 * Setup t_task pointer to t_task_backstore
+	 */
+	cmd->t_task = &cmd->t_task_backstore;
+
+	INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
+	init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+	init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+	init_completion(&T_TASK(cmd)->t_transport_stop_comp);
+	spin_lock_init(&T_TASK(cmd)->t_state_lock);
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
+
+	cmd->se_tfo = tfo;
+	cmd->se_sess = se_sess;
+	cmd->data_length = data_length;
+	cmd->data_direction = data_direction;
+	cmd->sam_task_attr = task_attr;
+	cmd->sense_buffer = sense_buffer;
+}
+EXPORT_SYMBOL(transport_init_se_cmd);
+
+static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+{
+	/*
+	 * Check if SAM Task Attribute emulation is enabled for this
+	 * struct se_device storage object
+	 */
+	if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+		return 0;
+
+	if (cmd->sam_task_attr == TASK_ATTR_ACA) {
+		DEBUG_STA("SAM Task Attribute ACA"
+			" emulation is not supported\n");
+		return -1;
+	}
+	/*
+	 * Used to determine when ORDERED commands should go from
+	 * Dormant to Active status.
+	 */
+	cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
+	smp_mb__after_atomic_inc();
+	DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+			cmd->se_ordered_id, cmd->sam_task_attr,
+			TRANSPORT(cmd->se_dev)->name);
+	return 0;
+}
+
+void transport_free_se_cmd(
+	struct se_cmd *se_cmd)
+{
+	if (se_cmd->se_tmr_req)
+		core_tmr_release_req(se_cmd->se_tmr_req);
+	/*
+	 * Check and free any extended CDB buffer that was allocated
+	 */
+	if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
+		kfree(T_TASK(se_cmd)->t_task_cdb);
+}
+EXPORT_SYMBOL(transport_free_se_cmd);
+
+static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
+
+/*	transport_generic_allocate_tasks():
+ *
+ *	Called from fabric RX Thread.
+ */
+int transport_generic_allocate_tasks(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	int ret;
+
+	transport_generic_prepare_cdb(cdb);
+
+	/*
+	 * This is needed for early exceptions.
+	 */
+	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+
+	transport_device_setup_cmd(cmd);
+	/*
+	 * Ensure that the received CDB is less than the max (252 + 8) bytes
+	 * for VARIABLE_LENGTH_CMD
+	 */
+	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
+		printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+		return -1;
+	}
+	/*
+	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
+	 * allocate the additional extended CDB buffer now..  Otherwise
+	 * setup the pointer from __t_task_cdb to t_task_cdb.
+	 */
+	if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
+		T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
+						GFP_KERNEL);
+		if (!(T_TASK(cmd)->t_task_cdb)) {
+			printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
+				" %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
+				scsi_command_size(cdb),
+				(unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
+			return -1;
+		}
+	} else
+		T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
+	/*
+	 * Copy the original CDB into T_TASK(cmd).
+	 */
+	memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
+	/*
+	 * Setup the received CDB based on SCSI defined opcodes and
+	 * perform unit attention, persistent reservations and ALUA
+	 * checks for virtual device backends.  The T_TASK(cmd)->t_task_cdb
+	 * pointer is expected to be setup before we reach this point.
+	 */
+	ret = transport_generic_cmd_sequencer(cmd, cdb);
+	if (ret < 0)
+		return ret;
+	/*
+	 * Check for SAM Task Attribute Emulation
+	 */
+	if (transport_check_alloc_task_attr(cmd) < 0) {
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -2;
+	}
+	spin_lock(&cmd->se_lun->lun_sep_lock);
+	if (cmd->se_lun->lun_sep)
+		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
+	spin_unlock(&cmd->se_lun->lun_sep_lock);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_allocate_tasks);
+
+/*
+ * Used by fabric module frontends not defining a TFO->new_cmd_map()
+ * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
+ */
+int transport_generic_handle_cdb(
+	struct se_cmd *cmd)
+{
+	if (!SE_LUN(cmd)) {
+		dump_stack();
+		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+		return -1;
+	}
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb);
+
+/*
+ * Used by fabric module frontends defining a TFO->new_cmd_map() caller
+ * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
+ * complete setup in TCM process context w/ TFO->new_cmd_map().
+ */
+int transport_generic_handle_cdb_map(
+	struct se_cmd *cmd)
+{
+	if (!SE_LUN(cmd)) {
+		dump_stack();
+		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+		return -1;
+	}
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb_map);
+
+/*	transport_generic_handle_data():
+ *
+ *
+ */
+int transport_generic_handle_data(
+	struct se_cmd *cmd)
+{
+	/*
+	 * For the software fabric case, then we assume the nexus is being
+	 * failed/shutdown when signals are pending from the kthread context
+	 * caller, so we return a failure.  For the HW target mode case running
+	 * in interrupt code, the signal_pending() check is skipped.
+	 */
+	if (!in_interrupt() && signal_pending(current))
+		return -1;
+	/*
+	 * If the received CDB has aleady been ABORTED by the generic
+	 * target engine, we now call transport_check_aborted_status()
+	 * to queue any delated TASK_ABORTED status for the received CDB to the
+	 * fabric module as we are expecting no futher incoming DATA OUT
+	 * sequences at this point.
+	 */
+	if (transport_check_aborted_status(cmd, 1) != 0)
+		return 0;
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_data);
+
+/*	transport_generic_handle_tmr():
+ *
+ *
+ */
+int transport_generic_handle_tmr(
+	struct se_cmd *cmd)
+{
+	/*
+	 * This is needed for early exceptions.
+	 */
+	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+	transport_device_setup_cmd(cmd);
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
+{
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+	int ret = 0;
+
+	DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
+		CMD_TFO(cmd)->get_task_tag(cmd));
+
+	/*
+	 * No tasks remain in the execution queue
+	 */
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list) {
+		DEBUG_TS("task_no[%d] - Processing task %p\n",
+				task->task_no, task);
+		/*
+		 * If the struct se_task has not been sent and is not active,
+		 * remove the struct se_task from the execution queue.
+		 */
+		if (!atomic_read(&task->task_sent) &&
+		    !atomic_read(&task->task_active)) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+			transport_remove_task_from_execute_queue(task,
+					task->se_dev);
+
+			DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+				task->task_no);
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			continue;
+		}
+
+		/*
+		 * If the struct se_task is active, sleep until it is returned
+		 * from the plugin.
+		 */
+		if (atomic_read(&task->task_active)) {
+			atomic_set(&task->task_stop, 1);
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+
+			DEBUG_TS("task_no[%d] - Waiting to complete\n",
+				task->task_no);
+			wait_for_completion(&task->task_stop_comp);
+			DEBUG_TS("task_no[%d] - Stopped successfully\n",
+				task->task_no);
+
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+			atomic_set(&task->task_active, 0);
+			atomic_set(&task->task_stop, 0);
+		} else {
+			DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+			ret++;
+		}
+
+		__transport_stop_task_timer(task, &flags);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return ret;
+}
+
+static void transport_failure_reset_queue_depth(struct se_device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);;
+	atomic_inc(&dev->depth_left);
+	atomic_inc(&SE_HBA(dev)->left_queue_depth);
+	spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+}
+
+/*
+ * Handle SAM-esque emulation for generic transport request failures.
+ */
+static void transport_generic_request_failure(
+	struct se_cmd *cmd,
+	struct se_device *dev,
+	int complete,
+	int sc)
+{
+	DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+		" CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+		T_TASK(cmd)->t_task_cdb[0]);
+	DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+		" %d/%d transport_error_status: %d\n",
+		CMD_TFO(cmd)->get_cmd_state(cmd),
+		cmd->t_state, cmd->deferred_t_state,
+		cmd->transport_error_status);
+	DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
+		" t_transport_active: %d t_transport_stop: %d"
+		" t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
+		atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+		atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+		atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
+		atomic_read(&T_TASK(cmd)->t_transport_active),
+		atomic_read(&T_TASK(cmd)->t_transport_stop),
+		atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+	transport_stop_all_task_timers(cmd);
+
+	if (dev)
+		transport_failure_reset_queue_depth(dev);
+	/*
+	 * For SAM Task Attribute emulation for failed struct se_cmd
+	 */
+	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		transport_complete_task_attr(cmd);
+
+	if (complete) {
+		transport_direct_request_timeout(cmd);
+		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+
+	switch (cmd->transport_error_status) {
+	case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		break;
+	case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
+		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+		break;
+	case PYX_TRANSPORT_INVALID_CDB_FIELD:
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		break;
+	case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		break;
+	case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
+		if (!sc)
+			transport_new_cmd_failure(cmd);
+		/*
+		 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
+		 * we force this session to fall back to session
+		 * recovery.
+		 */
+		CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
+		CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
+
+		goto check_stop;
+	case PYX_TRANSPORT_LU_COMM_FAILURE:
+	case PYX_TRANSPORT_ILLEGAL_REQUEST:
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		break;
+	case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
+		cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+		break;
+	case PYX_TRANSPORT_WRITE_PROTECTED:
+		cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+		break;
+	case PYX_TRANSPORT_RESERVATION_CONFLICT:
+		/*
+		 * No SENSE Data payload for this case, set SCSI Status
+		 * and queue the response to $FABRIC_MOD.
+		 *
+		 * Uses linux/include/scsi/scsi.h SAM status codes defs
+		 */
+		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+		/*
+		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+		 * CONFLICT STATUS.
+		 *
+		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+		 */
+		if (SE_SESS(cmd) &&
+		    DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+			core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+				cmd->orig_fe_lun, 0x2C,
+				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+
+		CMD_TFO(cmd)->queue_status(cmd);
+		goto check_stop;
+	case PYX_TRANSPORT_USE_SENSE_REASON:
+		/*
+		 * struct se_cmd->scsi_sense_reason already set
+		 */
+		break;
+	default:
+		printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
+			T_TASK(cmd)->t_task_cdb[0],
+			cmd->transport_error_status);
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		break;
+	}
+
+	if (!sc)
+		transport_new_cmd_failure(cmd);
+	else
+		transport_send_check_condition_and_sense(cmd,
+			cmd->scsi_sense_reason, 0);
+check_stop:
+	transport_lun_remove_cmd(cmd);
+	if (!(transport_cmd_check_stop_to_fabric(cmd)))
+		;
+}
+
+static void transport_direct_request_timeout(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+	if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+
+	atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
+		   &T_TASK(cmd)->t_se_count);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_generic_request_timeout(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	/*
+	 * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
+	 * to allow last call to free memory resources.
+	 */
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
+		int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
+
+		atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_generic_remove(cmd, 0, 0);
+}
+
+static int
+transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
+{
+	unsigned char *buf;
+
+	buf = kzalloc(data_length, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate memory for buffer\n");
+		return -1;
+	}
+
+	T_TASK(cmd)->t_tasks_se_num = 0;
+	T_TASK(cmd)->t_task_buf = buf;
+
+	return 0;
+}
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+	spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+}
+
+/*
+ * Called from interrupt context.
+ */
+static void transport_task_timeout_handler(unsigned long data)
+{
+	struct se_task *task = (struct se_task *)data;
+	struct se_cmd *cmd = TASK_CMD(task);
+	unsigned long flags;
+
+	DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (task->task_flags & TF_STOP) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+	task->task_flags &= ~TF_RUNNING;
+
+	/*
+	 * Determine if transport_complete_task() has already been called.
+	 */
+	if (!(atomic_read(&task->task_active))) {
+		DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+				" == 0\n", task, cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+
+	atomic_inc(&T_TASK(cmd)->t_se_count);
+	atomic_inc(&T_TASK(cmd)->t_transport_timeout);
+	T_TASK(cmd)->t_tasks_failed = 1;
+
+	atomic_set(&task->task_timeout, 1);
+	task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
+	task->task_scsi_status = 1;
+
+	if (atomic_read(&task->task_stop)) {
+		DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+				" == 1\n", task, cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		complete(&task->task_stop_comp);
+		return;
+	}
+
+	if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+		DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+				" t_task_cdbs_left\n", task, cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+	DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+			task, cmd);
+
+	cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
+}
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_start_task_timer(struct se_task *task)
+{
+	struct se_device *dev = task->se_dev;
+	int timeout;
+
+	if (task->task_flags & TF_RUNNING)
+		return;
+	/*
+	 * If the task_timeout is disabled, exit now.
+	 */
+	timeout = DEV_ATTRIB(dev)->task_timeout;
+	if (!(timeout))
+		return;
+
+	init_timer(&task->task_timer);
+	task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
+	task->task_timer.data = (unsigned long) task;
+	task->task_timer.function = transport_task_timeout_handler;
+
+	task->task_flags |= TF_RUNNING;
+	add_timer(&task->task_timer);
+#if 0
+	printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+		" %d\n", task->task_se_cmd, task, timeout);
+#endif
+}
+
+/*
+ * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
+ */
+void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+
+	if (!(task->task_flags & TF_RUNNING))
+		return;
+
+	task->task_flags |= TF_STOP;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
+
+	del_timer_sync(&task->task_timer);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
+	task->task_flags &= ~TF_RUNNING;
+	task->task_flags &= ~TF_STOP;
+}
+
+static void transport_stop_all_task_timers(struct se_cmd *cmd)
+{
+	struct se_task *task = NULL, *task_tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list)
+		__transport_stop_task_timer(task, &flags);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline int transport_tcq_window_closed(struct se_device *dev)
+{
+	if (dev->dev_tcq_window_closed++ <
+			PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
+		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
+	} else
+		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
+
+	wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+	return 0;
+}
+
+/*
+ * Called from Fabric Module context from transport_execute_tasks()
+ *
+ * The return of this function determins if the tasks from struct se_cmd
+ * get added to the execution queue in transport_execute_tasks(),
+ * or are added to the delayed or ordered lists here.
+ */
+static inline int transport_execute_task_attr(struct se_cmd *cmd)
+{
+	if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+		return 1;
+	/*
+	 * Check for the existance of HEAD_OF_QUEUE, and if true return 1
+	 * to allow the passed struct se_cmd list of tasks to the front of the list.
+	 */
+	 if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+		atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
+		smp_mb__after_atomic_inc();
+		DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+			" 0x%02x, se_ordered_id: %u\n",
+			T_TASK(cmd)->t_task_cdb[0],
+			cmd->se_ordered_id);
+		return 1;
+	} else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+		spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
+		list_add_tail(&cmd->se_ordered_list,
+				&SE_DEV(cmd)->ordered_cmd_list);
+		spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
+
+		atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
+		smp_mb__after_atomic_inc();
+
+		DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+				" list, se_ordered_id: %u\n",
+				T_TASK(cmd)->t_task_cdb[0],
+				cmd->se_ordered_id);
+		/*
+		 * Add ORDERED command to tail of execution queue if
+		 * no other older commands exist that need to be
+		 * completed first.
+		 */
+		if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
+			return 1;
+	} else {
+		/*
+		 * For SIMPLE and UNTAGGED Task Attribute commands
+		 */
+		atomic_inc(&SE_DEV(cmd)->simple_cmds);
+		smp_mb__after_atomic_inc();
+	}
+	/*
+	 * Otherwise if one or more outstanding ORDERED task attribute exist,
+	 * add the dormant task(s) built for the passed struct se_cmd to the
+	 * execution queue and become in Active state for this struct se_device.
+	 */
+	if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
+		/*
+		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
+		 * will be drained upon competion of HEAD_OF_QUEUE task.
+		 */
+		spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
+		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
+		list_add_tail(&cmd->se_delayed_list,
+				&SE_DEV(cmd)->delayed_cmd_list);
+		spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
+
+		DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+			" delayed CMD list, se_ordered_id: %u\n",
+			T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
+			cmd->se_ordered_id);
+		/*
+		 * Return zero to let transport_execute_tasks() know
+		 * not to add the delayed tasks to the execution list.
+		 */
+		return 0;
+	}
+	/*
+	 * Otherwise, no ORDERED task attributes exist..
+	 */
+	return 1;
+}
+
+/*
+ * Called from fabric module context in transport_generic_new_cmd() and
+ * transport_generic_process_write()
+ */
+static int transport_execute_tasks(struct se_cmd *cmd)
+{
+	int add_tasks;
+
+	if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
+		if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
+			cmd->transport_error_status =
+				PYX_TRANSPORT_LU_COMM_FAILURE;
+			transport_generic_request_failure(cmd, NULL, 0, 1);
+			return 0;
+		}
+	}
+	/*
+	 * Call transport_cmd_check_stop() to see if a fabric exception
+	 * has occured that prevents execution.
+	 */
+	if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+		/*
+		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
+		 * attribute for the tasks of the received struct se_cmd CDB
+		 */
+		add_tasks = transport_execute_task_attr(cmd);
+		if (add_tasks == 0)
+			goto execute_tasks;
+		/*
+		 * This calls transport_add_tasks_from_cmd() to handle
+		 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
+		 * (if enabled) in __transport_add_task_to_execute_queue() and
+		 * transport_add_task_check_sam_attr().
+		 */
+		transport_add_tasks_from_cmd(cmd);
+	}
+	/*
+	 * Kick the execution queue for the cmd associated struct se_device
+	 * storage object.
+	 */
+execute_tasks:
+	__transport_execute_tasks(SE_DEV(cmd));
+	return 0;
+}
+
+/*
+ * Called to check struct se_device tcq depth window, and once open pull struct se_task
+ * from struct se_device->execute_task_list and
+ *
+ * Called from transport_processing_thread()
+ */
+static int __transport_execute_tasks(struct se_device *dev)
+{
+	int error;
+	struct se_cmd *cmd = NULL;
+	struct se_task *task;
+	unsigned long flags;
+
+	/*
+	 * Check if there is enough room in the device and HBA queue to send
+	 * struct se_transport_task's to the selected transport.
+	 */
+check_depth:
+	spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+	if (!(atomic_read(&dev->depth_left)) ||
+	    !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
+		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+		return transport_tcq_window_closed(dev);
+	}
+	dev->dev_tcq_window_closed = 0;
+
+	spin_lock(&dev->execute_task_lock);
+	task = transport_get_task_from_execute_queue(dev);
+	spin_unlock(&dev->execute_task_lock);
+
+	if (!task) {
+		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+		return 0;
+	}
+
+	atomic_dec(&dev->depth_left);
+	atomic_dec(&SE_HBA(dev)->left_queue_depth);
+	spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+
+	cmd = TASK_CMD(task);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	atomic_set(&task->task_active, 1);
+	atomic_set(&task->task_sent, 1);
+	atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
+
+	if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
+	    T_TASK(cmd)->t_task_cdbs)
+		atomic_set(&cmd->transport_sent, 1);
+
+	transport_start_task_timer(task);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * The struct se_cmd->transport_emulate_cdb() function pointer is used
+	 * to grab REPORT_LUNS CDBs before they hit the
+	 * struct se_subsystem_api->do_task() caller below.
+	 */
+	if (cmd->transport_emulate_cdb) {
+		error = cmd->transport_emulate_cdb(cmd);
+		if (error != 0) {
+			cmd->transport_error_status = error;
+			atomic_set(&task->task_active, 0);
+			atomic_set(&cmd->transport_sent, 0);
+			transport_stop_tasks_for_cmd(cmd);
+			transport_generic_request_failure(cmd, dev, 0, 1);
+			goto check_depth;
+		}
+		/*
+		 * Handle the successful completion for transport_emulate_cdb()
+		 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
+		 * Otherwise the caller is expected to complete the task with
+		 * proper status.
+		 */
+		if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
+			cmd->scsi_status = SAM_STAT_GOOD;
+			task->task_scsi_status = GOOD;
+			transport_complete_task(task, 1);
+		}
+	} else {
+		/*
+		 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
+		 * RAMDISK we use the internal transport_emulate_control_cdb() logic
+		 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
+		 * LUN emulation code.
+		 *
+		 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
+		 * call ->do_task() directly and let the underlying TCM subsystem plugin
+		 * code handle the CDB emulation.
+		 */
+		if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
+		    (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+			error = transport_emulate_control_cdb(task);
+		else
+			error = TRANSPORT(dev)->do_task(task);
+
+		if (error != 0) {
+			cmd->transport_error_status = error;
+			atomic_set(&task->task_active, 0);
+			atomic_set(&cmd->transport_sent, 0);
+			transport_stop_tasks_for_cmd(cmd);
+			transport_generic_request_failure(cmd, dev, 0, 1);
+		}
+	}
+
+	goto check_depth;
+
+	return 0;
+}
+
+void transport_new_cmd_failure(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+	/*
+	 * Any unsolicited data will get dumped for failed command inside of
+	 * the fabric plugin
+	 */
+	spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+	se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
+	se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+
+	CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
+}
+
+static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
+
+static inline u32 transport_get_sectors_6(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 8-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * Use 24-bit allocation length for TYPE_TAPE.
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 8-bit sector value.
+	 */
+type_disk:
+	return (u32)cdb[4];
+}
+
+static inline u32 transport_get_sectors_10(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 16-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * XXX_10 is not defined in SSC, throw an exception
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+		*ret = -1;
+		return 0;
+	}
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 16-bit sector value.
+	 */
+type_disk:
+	return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * XXX_12 is not defined in SSC, throw an exception
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+		*ret = -1;
+		return 0;
+	}
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 32-bit sector value.
+	 */
+type_disk:
+	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * Use 24-bit allocation length for TYPE_TAPE.
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
+
+type_disk:
+	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+		    (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+		    (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_get_size(
+	u32 sectors,
+	unsigned char *cdb,
+	struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+		if (cdb[1] & 1) { /* sectors */
+			return DEV_ATTRIB(dev)->block_size * sectors;
+		} else /* bytes */
+			return sectors;
+	}
+#if 0
+	printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
+			" %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
+			DEV_ATTRIB(dev)->block_size * sectors,
+			TRANSPORT(dev)->name);
+#endif
+	return DEV_ATTRIB(dev)->block_size * sectors;
+}
+
+unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
+{
+	unsigned char result = 0;
+	/*
+	 * MSB
+	 */
+	if ((val[0] >= 'a') && (val[0] <= 'f'))
+		result = ((val[0] - 'a' + 10) & 0xf) << 4;
+	else
+		if ((val[0] >= 'A') && (val[0] <= 'F'))
+			result = ((val[0] - 'A' + 10) & 0xf) << 4;
+		else /* digit */
+			result = ((val[0] - '0') & 0xf) << 4;
+	/*
+	 * LSB
+	 */
+	if ((val[1] >= 'a') && (val[1] <= 'f'))
+		result |= ((val[1] - 'a' + 10) & 0xf);
+	else
+		if ((val[1] >= 'A') && (val[1] <= 'F'))
+			result |= ((val[1] - 'A' + 10) & 0xf);
+		else /* digit */
+			result |= ((val[1] - '0') & 0xf);
+
+	return result;
+}
+EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
+
+static void transport_xor_callback(struct se_cmd *cmd)
+{
+	unsigned char *buf, *addr;
+	struct se_mem *se_mem;
+	unsigned int offset;
+	int i;
+	/*
+	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+	 *
+	 * 1) read the specified logical block(s);
+	 * 2) transfer logical blocks from the data-out buffer;
+	 * 3) XOR the logical blocks transferred from the data-out buffer with
+	 *    the logical blocks read, storing the resulting XOR data in a buffer;
+	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+	 *    blocks transferred from the data-out buffer; and
+	 * 5) transfer the resulting XOR data to the data-in buffer.
+	 */
+	buf = kmalloc(cmd->data_length, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+		return;
+	}
+	/*
+	 * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
+	 * into the locally allocated *buf
+	 */
+	transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
+	/*
+	 * Now perform the XOR against the BIDI read memory located at
+	 * T_TASK(cmd)->t_mem_bidi_list
+	 */
+
+	offset = 0;
+	list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
+		addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
+		if (!(addr))
+			goto out;
+
+		for (i = 0; i < se_mem->se_len; i++)
+			*(addr + se_mem->se_off + i) ^= *(buf + offset + i);
+
+		offset += se_mem->se_len;
+		kunmap_atomic(addr, KM_USER0);
+	}
+out:
+	kfree(buf);
+}
+
+/*
+ * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
+ */
+static int transport_get_sense_data(struct se_cmd *cmd)
+{
+	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
+	struct se_device *dev;
+	struct se_task *task = NULL, *task_tmp;
+	unsigned long flags;
+	u32 offset = 0;
+
+	if (!SE_LUN(cmd)) {
+		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+		return -1;
+	}
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return 0;
+	}
+
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list) {
+
+		if (!task->task_sense)
+			continue;
+
+		dev = task->se_dev;
+		if (!(dev))
+			continue;
+
+		if (!TRANSPORT(dev)->get_sense_buffer) {
+			printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
+					" is NULL\n");
+			continue;
+		}
+
+		sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
+		if (!(sense_buffer)) {
+			printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+				" sense buffer for task with sense\n",
+				CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
+			continue;
+		}
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+				TRANSPORT_SENSE_BUFFER);
+
+		memcpy((void *)&buffer[offset], (void *)sense_buffer,
+				TRANSPORT_SENSE_BUFFER);
+		cmd->scsi_status = task->task_scsi_status;
+		/* Automatically padded */
+		cmd->scsi_sense_length =
+				(TRANSPORT_SENSE_BUFFER + offset);
+
+		printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+				" and sense\n",
+			dev->se_hba->hba_id, TRANSPORT(dev)->name,
+				cmd->scsi_status);
+		return 0;
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return -1;
+}
+
+static int transport_allocate_resources(struct se_cmd *cmd)
+{
+	u32 length = cmd->data_length;
+
+	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
+		return transport_generic_get_mem(cmd, length, PAGE_SIZE);
+	else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
+		return transport_generic_allocate_buf(cmd, length);
+	else
+		return 0;
+}
+
+static int
+transport_handle_reservation_conflict(struct se_cmd *cmd)
+{
+	cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+	cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+	/*
+	 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+	 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+	 * CONFLICT STATUS.
+	 *
+	 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+	 */
+	if (SE_SESS(cmd) &&
+	    DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+		core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+			cmd->orig_fe_lun, 0x2C,
+			ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+	return -2;
+}
+
+/*	transport_generic_cmd_sequencer():
+ *
+ *	Generic Command Sequencer that should work for most DAS transport
+ *	drivers.
+ *
+ *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
+ *	RX Thread.
+ *
+ *	FIXME: Need to support other SCSI OPCODES where as well.
+ */
+static int transport_generic_cmd_sequencer(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	int ret = 0, sector_ret = 0, passthrough;
+	u32 sectors = 0, size = 0, pr_reg_type = 0;
+	u16 service_action;
+	u8 alua_ascq = 0;
+	/*
+	 * Check for an existing UNIT ATTENTION condition
+	 */
+	if (core_scsi3_ua_check(cmd, cdb) < 0) {
+		cmd->transport_wait_for_tasks =
+				&transport_nop_wait_for_tasks;
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
+		return -2;
+	}
+	/*
+	 * Check status of Asymmetric Logical Unit Assignment port
+	 */
+	ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
+	if (ret != 0) {
+		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+		/*
+		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessable';
+		 * The ALUA additional sense code qualifier (ASCQ) is determined
+		 * by the ALUA primary or secondary access state..
+		 */
+		if (ret > 0) {
+#if 0
+			printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
+				CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
+#endif
+			transport_set_sense_codes(cmd, 0x04, alua_ascq);
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
+			return -2;
+		}
+		goto out_invalid_cdb_field;
+	}
+	/*
+	 * Check status for SPC-3 Persistent Reservations
+	 */
+	if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
+		if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
+					cmd, cdb, pr_reg_type) != 0)
+			return transport_handle_reservation_conflict(cmd);
+		/*
+		 * This means the CDB is allowed for the SCSI Initiator port
+		 * when said port is *NOT* holding the legacy SPC-2 or
+		 * SPC-3 Persistent Reservation.
+		 */
+	}
+
+	switch (cdb[0]) {
+	case READ_6:
+		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_6;
+		T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_10:
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_10;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_12:
+		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_12;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_16;
+		T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_6:
+		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_6;
+		T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_10:
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_10;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_12:
+		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_12;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_16;
+		T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case XDWRITEREAD_10:
+		if ((cmd->data_direction != DMA_TO_DEVICE) ||
+		    !(T_TASK(cmd)->t_tasks_bidi))
+			goto out_invalid_cdb_field;
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_10;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		passthrough = (TRANSPORT(dev)->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+		/*
+		 * Skip the remaining assignments for TCM/PSCSI passthrough
+		 */
+		if (passthrough)
+			break;
+		/*
+		 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
+		 */
+		cmd->transport_complete_callback = &transport_xor_callback;
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		break;
+	case VARIABLE_LENGTH_CMD:
+		service_action = get_unaligned_be16(&cdb[8]);
+		/*
+		 * Determine if this is TCM/PSCSI device and we should disable
+		 * internal emulation for this CDB.
+		 */
+		passthrough = (TRANSPORT(dev)->transport_type ==
+					TRANSPORT_PLUGIN_PHBA_PDEV);
+
+		switch (service_action) {
+		case XDWRITEREAD_32:
+			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+			if (sector_ret)
+				goto out_unsupported_cdb;
+			size = transport_get_size(sectors, cdb, cmd);
+			/*
+			 * Use WRITE_32 and READ_32 opcodes for the emulated
+			 * XDWRITE_READ_32 logic.
+			 */
+			cmd->transport_split_cdb = &split_cdb_XX_32;
+			T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
+			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+
+			/*
+			 * Skip the remaining assignments for TCM/PSCSI passthrough
+			 */
+			if (passthrough)
+				break;
+
+			/*
+			 * Setup BIDI XOR callback to be run during
+			 * transport_generic_complete_ok()
+			 */
+			cmd->transport_complete_callback = &transport_xor_callback;
+			T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
+			break;
+		case WRITE_SAME_32:
+			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+			if (sector_ret)
+				goto out_unsupported_cdb;
+			size = transport_get_size(sectors, cdb, cmd);
+			T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
+			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+			/*
+			 * Skip the remaining assignments for TCM/PSCSI passthrough
+			 */
+			if (passthrough)
+				break;
+
+			if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
+				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+					" bits not supported for Block Discard"
+					" Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+			/*
+			 * Currently for the emulated case we only accept
+			 * tpws with the UNMAP=1 bit set.
+			 */
+			if (!(cdb[10] & 0x08)) {
+				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+					" supported for Block Discard Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+			break;
+		default:
+			printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+				" 0x%04x not supported\n", service_action);
+			goto out_unsupported_cdb;
+		}
+		break;
+	case 0xa3:
+		if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+			/* MAINTENANCE_IN from SCC-2 */
+			/*
+			 * Check for emulated MI_REPORT_TARGET_PGS.
+			 */
+			if (cdb[1] == MI_REPORT_TARGET_PGS) {
+				cmd->transport_emulate_cdb =
+				(T10_ALUA(su_dev)->alua_type ==
+				 SPC3_ALUA_EMULATED) ?
+				&core_emulate_report_target_port_groups :
+				NULL;
+			}
+			size = (cdb[6] << 24) | (cdb[7] << 16) |
+			       (cdb[8] << 8) | cdb[9];
+		} else {
+			/* GPCMD_SEND_KEY from multi media commands */
+			size = (cdb[8] << 8) + cdb[9];
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case MODE_SELECT:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MODE_SELECT_10:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MODE_SENSE:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case MODE_SENSE_10:
+	case GPCMD_READ_BUFFER_CAPACITY:
+	case GPCMD_SEND_OPC:
+	case LOG_SELECT:
+	case LOG_SENSE:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_BLOCK_LIMITS:
+		size = READ_BLOCK_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case GPCMD_GET_CONFIGURATION:
+	case GPCMD_READ_FORMAT_CAPACITIES:
+	case GPCMD_READ_DISC_INFO:
+	case GPCMD_READ_TRACK_RZONE_INFO:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case PERSISTENT_RESERVE_IN:
+	case PERSISTENT_RESERVE_OUT:
+		cmd->transport_emulate_cdb =
+			(T10_RES(su_dev)->res_type ==
+			 SPC3_PERSISTENT_RESERVATIONS) ?
+			&core_scsi3_emulate_pr : NULL;
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case GPCMD_MECHANISM_STATUS:
+	case GPCMD_READ_DVD_STRUCTURE:
+		size = (cdb[8] << 8) + cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case READ_POSITION:
+		size = READ_POSITION_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case 0xa4:
+		if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+			/* MAINTENANCE_OUT from SCC-2
+			 *
+			 * Check for emulated MO_SET_TARGET_PGS.
+			 */
+			if (cdb[1] == MO_SET_TARGET_PGS) {
+				cmd->transport_emulate_cdb =
+				(T10_ALUA(su_dev)->alua_type ==
+					SPC3_ALUA_EMULATED) ?
+				&core_emulate_set_target_port_groups :
+				NULL;
+			}
+
+			size = (cdb[6] << 24) | (cdb[7] << 16) |
+			       (cdb[8] << 8) | cdb[9];
+		} else  {
+			/* GPCMD_REPORT_KEY from multi media commands */
+			size = (cdb[8] << 8) + cdb[9];
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case INQUIRY:
+		size = (cdb[3] << 8) + cdb[4];
+		/*
+		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+		 * See spc4r17 section 5.3
+		 */
+		if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+			cmd->sam_task_attr = TASK_ATTR_HOQ;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_BUFFER:
+		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_CAPACITY:
+		size = READ_CAP_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_MEDIA_SERIAL_NUMBER:
+	case SECURITY_PROTOCOL_IN:
+	case SECURITY_PROTOCOL_OUT:
+		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case SERVICE_ACTION_IN:
+	case ACCESS_CONTROL_IN:
+	case ACCESS_CONTROL_OUT:
+	case EXTENDED_COPY:
+	case READ_ATTRIBUTE:
+	case RECEIVE_COPY_RESULTS:
+	case WRITE_ATTRIBUTE:
+		size = (cdb[10] << 24) | (cdb[11] << 16) |
+		       (cdb[12] << 8) | cdb[13];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+		size = (cdb[3] << 8) | cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
+#if 0
+	case GPCMD_READ_CD:
+		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		size = (2336 * sectors);
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+#endif
+	case READ_TOC:
+		size = cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case REQUEST_SENSE:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_ELEMENT_STATUS:
+		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case WRITE_BUFFER:
+		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/*
+		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
+		 */
+		if (cdb[0] == RESERVE_10)
+			size = (cdb[7] << 8) | cdb[8];
+		else
+			size = cmd->data_length;
+
+		/*
+		 * Setup the legacy emulated handler for SPC-2 and
+		 * >= SPC-3 compatible reservation handling (CRH=1)
+		 * Otherwise, we assume the underlying SCSI logic is
+		 * is running in SPC_PASSTHROUGH, and wants reservations
+		 * emulation disabled.
+		 */
+		cmd->transport_emulate_cdb =
+				(T10_RES(su_dev)->res_type !=
+				 SPC_PASSTHROUGH) ?
+				&core_scsi2_emulate_crh : NULL;
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		/*
+		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
+		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
+		*/
+		if (cdb[0] == RELEASE_10)
+			size = (cdb[7] << 8) | cdb[8];
+		else
+			size = cmd->data_length;
+
+		cmd->transport_emulate_cdb =
+				(T10_RES(su_dev)->res_type !=
+				 SPC_PASSTHROUGH) ?
+				&core_scsi2_emulate_crh : NULL;
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case SYNCHRONIZE_CACHE:
+	case 0x91: /* SYNCHRONIZE_CACHE_16: */
+		/*
+		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
+		 */
+		if (cdb[0] == SYNCHRONIZE_CACHE) {
+			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+			T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		} else {
+			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+			T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+		}
+		if (sector_ret)
+			goto out_unsupported_cdb;
+
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+
+		/*
+		 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
+		 */
+		if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+			break;
+		/*
+		 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
+		 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
+		 */
+		cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
+		/*
+		 * Check to ensure that LBA + Range does not exceed past end of
+		 * device.
+		 */
+		if (transport_get_sectors(cmd) < 0)
+			goto out_invalid_cdb_field;
+		break;
+	case UNMAP:
+		size = get_unaligned_be16(&cdb[7]);
+		passthrough = (TRANSPORT(dev)->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+		/*
+		 * Determine if the received UNMAP used to for direct passthrough
+		 * into Linux/SCSI with struct request via TCM/pSCSI or we are
+		 * signaling the use of internal transport_generic_unmap() emulation
+		 * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
+		 * subsystem plugin backstores.
+		 */
+		if (!(passthrough))
+			cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
+
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case WRITE_SAME_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
+		passthrough = (TRANSPORT(dev)->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+		/*
+		 * Determine if the received WRITE_SAME_16 is used to for direct
+		 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
+		 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
+		 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
+		 * TCM/FILEIO subsystem plugin backstores.
+		 */
+		if (!(passthrough)) {
+			if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
+				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+					" bits not supported for Block Discard"
+					" Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+			/*
+			 * Currently for the emulated case we only accept
+			 * tpws with the UNMAP=1 bit set.
+			 */
+			if (!(cdb[1] & 0x08)) {
+				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+					" supported for Block Discard Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case GPCMD_CLOSE_TRACK:
+	case ERASE:
+	case INITIALIZE_ELEMENT_STATUS:
+	case GPCMD_LOAD_UNLOAD:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case GPCMD_SET_SPEED:
+	case SPACE:
+	case START_STOP:
+	case TEST_UNIT_READY:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+	case MOVE_MEDIUM:
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case REPORT_LUNS:
+		cmd->transport_emulate_cdb =
+				&transport_core_report_lun_response;
+		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		/*
+		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+		 * See spc4r17 section 5.3
+		 */
+		if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+			cmd->sam_task_attr = TASK_ATTR_HOQ;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	default:
+		printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+			" 0x%02x, sending CHECK_CONDITION.\n",
+			CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
+		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+		goto out_unsupported_cdb;
+	}
+
+	if (size != cmd->data_length) {
+		printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
+			" 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
+				cmd->data_length, size, cdb[0]);
+
+		cmd->cmd_spdtl = size;
+
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			printk(KERN_ERR "Rejecting underflow/overflow"
+					" WRITE data\n");
+			goto out_invalid_cdb_field;
+		}
+		/*
+		 * Reject READ_* or WRITE_* with overflow/underflow for
+		 * type SCF_SCSI_DATA_SG_IO_CDB.
+		 */
+		if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512))  {
+			printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+				" CDB on non 512-byte sector setup subsystem"
+				" plugin: %s\n", TRANSPORT(dev)->name);
+			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+			goto out_invalid_cdb_field;
+		}
+
+		if (size > cmd->data_length) {
+			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+			cmd->residual_count = (size - cmd->data_length);
+		} else {
+			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+			cmd->residual_count = (cmd->data_length - size);
+		}
+		cmd->data_length = size;
+	}
+
+	transport_set_supported_SAM_opcode(cmd);
+	return ret;
+
+out_unsupported_cdb:
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+	return -2;
+out_invalid_cdb_field:
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+	return -2;
+}
+
+static inline void transport_release_tasks(struct se_cmd *);
+
+/*
+ * This function will copy a contiguous *src buffer into a destination
+ * struct scatterlist array.
+ */
+static void transport_memcpy_write_contig(
+	struct se_cmd *cmd,
+	struct scatterlist *sg_d,
+	unsigned char *src)
+{
+	u32 i = 0, length = 0, total_length = cmd->data_length;
+	void *dst;
+
+	while (total_length) {
+		length = sg_d[i].length;
+
+		if (length > total_length)
+			length = total_length;
+
+		dst = sg_virt(&sg_d[i]);
+
+		memcpy(dst, src, length);
+
+		if (!(total_length -= length))
+			return;
+
+		src += length;
+		i++;
+	}
+}
+
+/*
+ * This function will copy a struct scatterlist array *sg_s into a destination
+ * contiguous *dst buffer.
+ */
+static void transport_memcpy_read_contig(
+	struct se_cmd *cmd,
+	unsigned char *dst,
+	struct scatterlist *sg_s)
+{
+	u32 i = 0, length = 0, total_length = cmd->data_length;
+	void *src;
+
+	while (total_length) {
+		length = sg_s[i].length;
+
+		if (length > total_length)
+			length = total_length;
+
+		src = sg_virt(&sg_s[i]);
+
+		memcpy(dst, src, length);
+
+		if (!(total_length -= length))
+			return;
+
+		dst += length;
+		i++;
+	}
+}
+
+static void transport_memcpy_se_mem_read_contig(
+	struct se_cmd *cmd,
+	unsigned char *dst,
+	struct list_head *se_mem_list)
+{
+	struct se_mem *se_mem;
+	void *src;
+	u32 length = 0, total_length = cmd->data_length;
+
+	list_for_each_entry(se_mem, se_mem_list, se_list) {
+		length = se_mem->se_len;
+
+		if (length > total_length)
+			length = total_length;
+
+		src = page_address(se_mem->se_page) + se_mem->se_off;
+
+		memcpy(dst, src, length);
+
+		if (!(total_length -= length))
+			return;
+
+		dst += length;
+	}
+}
+
+/*
+ * Called from transport_generic_complete_ok() and
+ * transport_generic_request_failure() to determine which dormant/delayed
+ * and ordered cmds need to have their tasks added to the execution queue.
+ */
+static void transport_complete_task_attr(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_cmd *cmd_p, *cmd_tmp;
+	int new_active_tasks = 0;
+
+	if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
+		atomic_dec(&dev->simple_cmds);
+		smp_mb__after_atomic_dec();
+		dev->dev_cur_ordered_id++;
+		DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
+			cmd->se_ordered_id);
+	} else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+		atomic_dec(&dev->dev_hoq_count);
+		smp_mb__after_atomic_dec();
+		dev->dev_cur_ordered_id++;
+		DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
+			cmd->se_ordered_id);
+	} else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+		spin_lock(&dev->ordered_cmd_lock);
+		list_del(&cmd->se_ordered_list);
+		atomic_dec(&dev->dev_ordered_sync);
+		smp_mb__after_atomic_dec();
+		spin_unlock(&dev->ordered_cmd_lock);
+
+		dev->dev_cur_ordered_id++;
+		DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+	}
+	/*
+	 * Process all commands up to the last received
+	 * ORDERED task attribute which requires another blocking
+	 * boundary
+	 */
+	spin_lock(&dev->delayed_cmd_lock);
+	list_for_each_entry_safe(cmd_p, cmd_tmp,
+			&dev->delayed_cmd_list, se_delayed_list) {
+
+		list_del(&cmd_p->se_delayed_list);
+		spin_unlock(&dev->delayed_cmd_lock);
+
+		DEBUG_STA("Calling add_tasks() for"
+			" cmd_p: 0x%02x Task Attr: 0x%02x"
+			" Dormant -> Active, se_ordered_id: %u\n",
+			T_TASK(cmd_p)->t_task_cdb[0],
+			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
+
+		transport_add_tasks_from_cmd(cmd_p);
+		new_active_tasks++;
+
+		spin_lock(&dev->delayed_cmd_lock);
+		if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
+			break;
+	}
+	spin_unlock(&dev->delayed_cmd_lock);
+	/*
+	 * If new tasks have become active, wake up the transport thread
+	 * to do the processing of the Active tasks.
+	 */
+	if (new_active_tasks != 0)
+		wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+}
+
+static void transport_generic_complete_ok(struct se_cmd *cmd)
+{
+	int reason = 0;
+	/*
+	 * Check if we need to move delayed/dormant tasks from cmds on the
+	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
+	 * Attribute.
+	 */
+	if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		transport_complete_task_attr(cmd);
+	/*
+	 * Check if we need to retrieve a sense buffer from
+	 * the struct se_cmd in question.
+	 */
+	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+		if (transport_get_sense_data(cmd) < 0)
+			reason = TCM_NON_EXISTENT_LUN;
+
+		/*
+		 * Only set when an struct se_task->task_scsi_status returned
+		 * a non GOOD status.
+		 */
+		if (cmd->scsi_status) {
+			transport_send_check_condition_and_sense(
+					cmd, reason, 1);
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop_to_fabric(cmd);
+			return;
+		}
+	}
+	/*
+	 * Check for a callback, used by amoungst other things
+	 * XDWRITE_READ_10 emulation.
+	 */
+	if (cmd->transport_complete_callback)
+		cmd->transport_complete_callback(cmd);
+
+	switch (cmd->data_direction) {
+	case DMA_FROM_DEVICE:
+		spin_lock(&cmd->se_lun->lun_sep_lock);
+		if (SE_LUN(cmd)->lun_sep) {
+			SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+					cmd->data_length;
+		}
+		spin_unlock(&cmd->se_lun->lun_sep_lock);
+		/*
+		 * If enabled by TCM fabirc module pre-registered SGL
+		 * memory, perform the memcpy() from the TCM internal
+		 * contigious buffer back to the original SGL.
+		 */
+		if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+			transport_memcpy_write_contig(cmd,
+				 T_TASK(cmd)->t_task_pt_sgl,
+				 T_TASK(cmd)->t_task_buf);
+
+		CMD_TFO(cmd)->queue_data_in(cmd);
+		break;
+	case DMA_TO_DEVICE:
+		spin_lock(&cmd->se_lun->lun_sep_lock);
+		if (SE_LUN(cmd)->lun_sep) {
+			SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
+				cmd->data_length;
+		}
+		spin_unlock(&cmd->se_lun->lun_sep_lock);
+		/*
+		 * Check if we need to send READ payload for BIDI-COMMAND
+		 */
+		if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
+			spin_lock(&cmd->se_lun->lun_sep_lock);
+			if (SE_LUN(cmd)->lun_sep) {
+				SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+					cmd->data_length;
+			}
+			spin_unlock(&cmd->se_lun->lun_sep_lock);
+			CMD_TFO(cmd)->queue_data_in(cmd);
+			break;
+		}
+		/* Fall through for DMA_TO_DEVICE */
+	case DMA_NONE:
+		CMD_TFO(cmd)->queue_status(cmd);
+		break;
+	default:
+		break;
+	}
+
+	transport_lun_remove_cmd(cmd);
+	transport_cmd_check_stop_to_fabric(cmd);
+}
+
+static void transport_free_dev_tasks(struct se_cmd *cmd)
+{
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list) {
+		if (atomic_read(&task->task_active))
+			continue;
+
+		kfree(task->task_sg_bidi);
+		kfree(task->task_sg);
+
+		list_del(&task->t_list);
+
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		if (task->se_dev)
+			TRANSPORT(task->se_dev)->free_task(task);
+		else
+			printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+				task->task_no);
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+	struct se_mem *se_mem, *se_mem_tmp;
+	int free_page = 1;
+
+	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+		free_page = 0;
+	if (cmd->se_dev->transport->do_se_mem_map)
+		free_page = 0;
+
+	if (T_TASK(cmd)->t_task_buf) {
+		kfree(T_TASK(cmd)->t_task_buf);
+		T_TASK(cmd)->t_task_buf = NULL;
+		return;
+	}
+
+	/*
+	 * Caller will handle releasing of struct se_mem.
+	 */
+	if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
+		return;
+
+	if (!(T_TASK(cmd)->t_tasks_se_num))
+		return;
+
+	list_for_each_entry_safe(se_mem, se_mem_tmp,
+			T_TASK(cmd)->t_mem_list, se_list) {
+		/*
+		 * We only release call __free_page(struct se_mem->se_page) when
+		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+		 */
+		if (free_page)
+			__free_page(se_mem->se_page);
+
+		list_del(&se_mem->se_list);
+		kmem_cache_free(se_mem_cache, se_mem);
+	}
+
+	if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
+		list_for_each_entry_safe(se_mem, se_mem_tmp,
+				T_TASK(cmd)->t_mem_bidi_list, se_list) {
+			/*
+			 * We only release call __free_page(struct se_mem->se_page) when
+			 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+			 */
+			if (free_page)
+				__free_page(se_mem->se_page);
+
+			list_del(&se_mem->se_list);
+			kmem_cache_free(se_mem_cache, se_mem);
+		}
+	}
+
+	kfree(T_TASK(cmd)->t_mem_bidi_list);
+	T_TASK(cmd)->t_mem_bidi_list = NULL;
+	kfree(T_TASK(cmd)->t_mem_list);
+	T_TASK(cmd)->t_mem_list = NULL;
+	T_TASK(cmd)->t_tasks_se_num = 0;
+}
+
+static inline void transport_release_tasks(struct se_cmd *cmd)
+{
+	transport_free_dev_tasks(cmd);
+}
+
+static inline int transport_dec_and_check(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+			return 1;
+		}
+	}
+
+	if (atomic_read(&T_TASK(cmd)->t_se_count)) {
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+			return 1;
+		}
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return 0;
+}
+
+static void transport_release_fe_cmd(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	if (transport_dec_and_check(cmd))
+		return;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		goto free_pages;
+	}
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+	transport_all_task_dev_remove_state(cmd);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_release_tasks(cmd);
+free_pages:
+	transport_free_pages(cmd);
+	transport_free_se_cmd(cmd);
+	CMD_TFO(cmd)->release_cmd_direct(cmd);
+}
+
+static int transport_generic_remove(
+	struct se_cmd *cmd,
+	int release_to_pool,
+	int session_reinstatement)
+{
+	unsigned long flags;
+
+	if (!(T_TASK(cmd)))
+		goto release_cmd;
+
+	if (transport_dec_and_check(cmd)) {
+		if (session_reinstatement) {
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			transport_all_task_dev_remove_state(cmd);
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+		}
+		return 1;
+	}
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		goto free_pages;
+	}
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+	transport_all_task_dev_remove_state(cmd);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_release_tasks(cmd);
+free_pages:
+	transport_free_pages(cmd);
+
+release_cmd:
+	if (release_to_pool) {
+		transport_release_cmd_to_pool(cmd);
+	} else {
+		transport_free_se_cmd(cmd);
+		CMD_TFO(cmd)->release_cmd_direct(cmd);
+	}
+
+	return 0;
+}
+
+/*
+ * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
+ * @cmd:  Associated se_cmd descriptor
+ * @mem:  SGL style memory for TCM WRITE / READ
+ * @sg_mem_num: Number of SGL elements
+ * @mem_bidi_in: SGL style memory for TCM BIDI READ
+ * @sg_mem_bidi_num: Number of BIDI READ SGL elements
+ *
+ * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
+ * of parameters.
+ */
+int transport_generic_map_mem_to_cmd(
+	struct se_cmd *cmd,
+	struct scatterlist *mem,
+	u32 sg_mem_num,
+	struct scatterlist *mem_bidi_in,
+	u32 sg_mem_bidi_num)
+{
+	u32 se_mem_cnt_out = 0;
+	int ret;
+
+	if (!(mem) || !(sg_mem_num))
+		return 0;
+	/*
+	 * Passed *mem will contain a list_head containing preformatted
+	 * struct se_mem elements...
+	 */
+	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
+		if ((mem_bidi_in) || (sg_mem_bidi_num)) {
+			printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
+				" with BIDI-COMMAND\n");
+			return -ENOSYS;
+		}
+
+		T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
+		T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
+		cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
+		return 0;
+	}
+	/*
+	 * Otherwise, assume the caller is passing a struct scatterlist
+	 * array from include/linux/scatterlist.h
+	 */
+	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+		/*
+		 * For CDB using TCM struct se_mem linked list scatterlist memory
+		 * processed into a TCM struct se_subsystem_dev, we do the mapping
+		 * from the passed physical memory to struct se_mem->se_page here.
+		 */
+		T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+		if (!(T_TASK(cmd)->t_mem_list))
+			return -ENOMEM;
+
+		ret = transport_map_sg_to_mem(cmd,
+			T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
+		if (ret < 0)
+			return -ENOMEM;
+
+		T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
+		/*
+		 * Setup BIDI READ list of struct se_mem elements
+		 */
+		if ((mem_bidi_in) && (sg_mem_bidi_num)) {
+			T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+			if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+				kfree(T_TASK(cmd)->t_mem_list);
+				return -ENOMEM;
+			}
+			se_mem_cnt_out = 0;
+
+			ret = transport_map_sg_to_mem(cmd,
+				T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
+				&se_mem_cnt_out);
+			if (ret < 0) {
+				kfree(T_TASK(cmd)->t_mem_list);
+				return -ENOMEM;
+			}
+
+			T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
+		}
+		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+	} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+		if (mem_bidi_in || sg_mem_bidi_num) {
+			printk(KERN_ERR "BIDI-Commands not supported using "
+				"SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
+			return -ENOSYS;
+		}
+		/*
+		 * For incoming CDBs using a contiguous buffer internall with TCM,
+		 * save the passed struct scatterlist memory.  After TCM storage object
+		 * processing has completed for this struct se_cmd, TCM core will call
+		 * transport_memcpy_[write,read]_contig() as necessary from
+		 * transport_generic_complete_ok() and transport_write_pending() in order
+		 * to copy the TCM buffer to/from the original passed *mem in SGL ->
+		 * struct scatterlist format.
+		 */
+		cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
+		T_TASK(cmd)->t_task_pt_sgl = mem;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
+
+
+static inline long long transport_dev_end_lba(struct se_device *dev)
+{
+	return dev->transport->get_blocks(dev) + 1;
+}
+
+static int transport_get_sectors(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+
+	T_TASK(cmd)->t_tasks_sectors =
+		(cmd->data_length / DEV_ATTRIB(dev)->block_size);
+	if (!(T_TASK(cmd)->t_tasks_sectors))
+		T_TASK(cmd)->t_tasks_sectors = 1;
+
+	if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
+		return 0;
+
+	if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
+	     transport_dev_end_lba(dev)) {
+		printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
+			" transport_dev_end_lba(): %llu\n",
+			T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+			transport_dev_end_lba(dev));
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+		return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
+	}
+
+	return 0;
+}
+
+static int transport_new_cmd_obj(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	u32 task_cdbs = 0, rc;
+
+	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+		task_cdbs++;
+		T_TASK(cmd)->t_task_cdbs++;
+	} else {
+		int set_counts = 1;
+
+		/*
+		 * Setup any BIDI READ tasks and memory from
+		 * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
+		 * are queued first for the non pSCSI passthrough case.
+		 */
+		if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+		    (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
+			rc = transport_generic_get_cdb_count(cmd,
+				T_TASK(cmd)->t_task_lba,
+				T_TASK(cmd)->t_tasks_sectors,
+				DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
+				set_counts);
+			if (!(rc)) {
+				cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+				cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+				return PYX_TRANSPORT_LU_COMM_FAILURE;
+			}
+			set_counts = 0;
+		}
+		/*
+		 * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
+		 * Note for BIDI transfers this will contain the WRITE payload
+		 */
+		task_cdbs = transport_generic_get_cdb_count(cmd,
+				T_TASK(cmd)->t_task_lba,
+				T_TASK(cmd)->t_tasks_sectors,
+				cmd->data_direction, T_TASK(cmd)->t_mem_list,
+				set_counts);
+		if (!(task_cdbs)) {
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			return PYX_TRANSPORT_LU_COMM_FAILURE;
+		}
+		T_TASK(cmd)->t_task_cdbs += task_cdbs;
+
+#if 0
+		printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
+			" %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
+			T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+			T_TASK(cmd)->t_task_cdbs);
+#endif
+	}
+
+	atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
+	atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
+	atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
+	return 0;
+}
+
+static struct list_head *transport_init_se_mem_list(void)
+{
+	struct list_head *se_mem_list;
+
+	se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (!(se_mem_list)) {
+		printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(se_mem_list);
+
+	return se_mem_list;
+}
+
+static int
+transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
+{
+	unsigned char *buf;
+	struct se_mem *se_mem;
+
+	T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+	if (!(T_TASK(cmd)->t_mem_list))
+		return -ENOMEM;
+
+	/*
+	 * If the device uses memory mapping this is enough.
+	 */
+	if (cmd->se_dev->transport->do_se_mem_map)
+		return 0;
+
+	/*
+	 * Setup BIDI-COMMAND READ list of struct se_mem elements
+	 */
+	if (T_TASK(cmd)->t_tasks_bidi) {
+		T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+		if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+			kfree(T_TASK(cmd)->t_mem_list);
+			return -ENOMEM;
+		}
+	}
+
+	while (length) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			goto out;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+		se_mem->se_len = (length > dma_size) ? dma_size : length;
+
+/* #warning FIXME Allocate contigous pages for struct se_mem elements */
+		se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
+		if (!(se_mem->se_page)) {
+			printk(KERN_ERR "alloc_pages() failed\n");
+			goto out;
+		}
+
+		buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
+		if (!(buf)) {
+			printk(KERN_ERR "kmap_atomic() failed\n");
+			goto out;
+		}
+		memset(buf, 0, se_mem->se_len);
+		kunmap_atomic(buf, KM_IRQ0);
+
+		list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
+		T_TASK(cmd)->t_tasks_se_num++;
+
+		DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
+			" Offset(%u)\n", se_mem->se_page, se_mem->se_len,
+			se_mem->se_off);
+
+		length -= se_mem->se_len;
+	}
+
+	DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
+			T_TASK(cmd)->t_tasks_se_num);
+
+	return 0;
+out:
+	return -1;
+}
+
+extern u32 transport_calc_sg_num(
+	struct se_task *task,
+	struct se_mem *in_se_mem,
+	u32 task_offset)
+{
+	struct se_cmd *se_cmd = task->task_se_cmd;
+	struct se_device *se_dev = SE_DEV(se_cmd);
+	struct se_mem *se_mem = in_se_mem;
+	struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
+	u32 sg_length, task_size = task->task_size, task_sg_num_padded;
+
+	while (task_size != 0) {
+		DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
+			" se_mem->se_off(%u) task_offset(%u)\n",
+			se_mem->se_page, se_mem->se_len,
+			se_mem->se_off, task_offset);
+
+		if (task_offset == 0) {
+			if (task_size >= se_mem->se_len) {
+				sg_length = se_mem->se_len;
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list)))
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+			} else {
+				sg_length = task_size;
+				task_size -= sg_length;
+				goto next;
+			}
+
+			DEBUG_SC("sg_length(%u) task_size(%u)\n",
+					sg_length, task_size);
+		} else {
+			if ((se_mem->se_len - task_offset) > task_size) {
+				sg_length = task_size;
+				task_size -= sg_length;
+				goto next;
+			 } else {
+				sg_length = (se_mem->se_len - task_offset);
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list)))
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+			}
+
+			DEBUG_SC("sg_length(%u) task_size(%u)\n",
+					sg_length, task_size);
+
+			task_offset = 0;
+		}
+		task_size -= sg_length;
+next:
+		DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
+			task->task_no, task_size);
+
+		task->task_sg_num++;
+	}
+	/*
+	 * Check if the fabric module driver is requesting that all
+	 * struct se_task->task_sg[] be chained together..  If so,
+	 * then allocate an extra padding SG entry for linking and
+	 * marking the end of the chained SGL.
+	 */
+	if (tfo->task_sg_chaining) {
+		task_sg_num_padded = (task->task_sg_num + 1);
+		task->task_padded_sg = 1;
+	} else
+		task_sg_num_padded = task->task_sg_num;
+
+	task->task_sg = kzalloc(task_sg_num_padded *
+			sizeof(struct scatterlist), GFP_KERNEL);
+	if (!(task->task_sg)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" task->task_sg\n");
+		return 0;
+	}
+	sg_init_table(&task->task_sg[0], task_sg_num_padded);
+	/*
+	 * Setup task->task_sg_bidi for SCSI READ payload for
+	 * TCM/pSCSI passthrough if present for BIDI-COMMAND
+	 */
+	if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
+	    (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
+		task->task_sg_bidi = kzalloc(task_sg_num_padded *
+				sizeof(struct scatterlist), GFP_KERNEL);
+		if (!(task->task_sg_bidi)) {
+			printk(KERN_ERR "Unable to allocate memory for"
+				" task->task_sg_bidi\n");
+			return 0;
+		}
+		sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
+	}
+	/*
+	 * For the chaining case, setup the proper end of SGL for the
+	 * initial submission struct task into struct se_subsystem_api.
+	 * This will be cleared later by transport_do_task_sg_chain()
+	 */
+	if (task->task_padded_sg) {
+		sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
+		/*
+		 * Added the 'if' check before marking end of bi-directional
+		 * scatterlist (which gets created only in case of request
+		 * (RD + WR).
+		 */
+		if (task->task_sg_bidi)
+			sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
+	}
+
+	DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
+		" task_sg_num_padded(%u)\n", task->task_sg_num,
+		task_sg_num_padded);
+
+	return task->task_sg_num;
+}
+
+static inline int transport_set_tasks_sectors_disk(
+	struct se_task *task,
+	struct se_device *dev,
+	unsigned long long lba,
+	u32 sectors,
+	int *max_sectors_set)
+{
+	if ((lba + sectors) > transport_dev_end_lba(dev)) {
+		task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
+
+		if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
+			task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+			*max_sectors_set = 1;
+		}
+	} else {
+		if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+			task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+			*max_sectors_set = 1;
+		} else
+			task->task_sectors = sectors;
+	}
+
+	return 0;
+}
+
+static inline int transport_set_tasks_sectors_non_disk(
+	struct se_task *task,
+	struct se_device *dev,
+	unsigned long long lba,
+	u32 sectors,
+	int *max_sectors_set)
+{
+	if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+		task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+		*max_sectors_set = 1;
+	} else
+		task->task_sectors = sectors;
+
+	return 0;
+}
+
+static inline int transport_set_tasks_sectors(
+	struct se_task *task,
+	struct se_device *dev,
+	unsigned long long lba,
+	u32 sectors,
+	int *max_sectors_set)
+{
+	return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
+		transport_set_tasks_sectors_disk(task, dev, lba, sectors,
+				max_sectors_set) :
+		transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
+				max_sectors_set);
+}
+
+static int transport_map_sg_to_mem(
+	struct se_cmd *cmd,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	u32 *se_mem_cnt)
+{
+	struct se_mem *se_mem;
+	struct scatterlist *sg;
+	u32 sg_count = 1, cmd_size = cmd->data_length;
+
+	if (!in_mem) {
+		printk(KERN_ERR "No source scatterlist\n");
+		return -1;
+	}
+	sg = (struct scatterlist *)in_mem;
+
+	while (cmd_size) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+		DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
+			" sg_page: %p offset: %d length: %d\n", cmd_size,
+			sg_page(sg), sg->offset, sg->length);
+
+		se_mem->se_page = sg_page(sg);
+		se_mem->se_off = sg->offset;
+
+		if (cmd_size > sg->length) {
+			se_mem->se_len = sg->length;
+			sg = sg_next(sg);
+			sg_count++;
+		} else
+			se_mem->se_len = cmd_size;
+
+		cmd_size -= se_mem->se_len;
+
+		DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
+				*se_mem_cnt, cmd_size);
+		DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
+				se_mem->se_page, se_mem->se_off, se_mem->se_len);
+
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+	}
+
+	DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
+		" struct se_mem\n", sg_count, *se_mem_cnt);
+
+	if (sg_count != *se_mem_cnt)
+		BUG();
+
+	return 0;
+}
+
+/*	transport_map_mem_to_sg():
+ *
+ *
+ */
+int transport_map_mem_to_sg(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	struct se_mem *in_se_mem,
+	struct se_mem **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	struct se_cmd *se_cmd = task->task_se_cmd;
+	struct se_mem *se_mem = in_se_mem;
+	struct scatterlist *sg = (struct scatterlist *)in_mem;
+	u32 task_size = task->task_size, sg_no = 0;
+
+	if (!sg) {
+		printk(KERN_ERR "Unable to locate valid struct"
+				" scatterlist pointer\n");
+		return -1;
+	}
+
+	while (task_size != 0) {
+		/*
+		 * Setup the contigious array of scatterlists for
+		 * this struct se_task.
+		 */
+		sg_assign_page(sg, se_mem->se_page);
+
+		if (*task_offset == 0) {
+			sg->offset = se_mem->se_off;
+
+			if (task_size >= se_mem->se_len) {
+				sg->length = se_mem->se_len;
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list))) {
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+					(*se_mem_cnt)++;
+				}
+			} else {
+				sg->length = task_size;
+				/*
+				 * Determine if we need to calculate an offset
+				 * into the struct se_mem on the next go around..
+				 */
+				task_size -= sg->length;
+				if (!(task_size))
+					*task_offset = sg->length;
+
+				goto next;
+			}
+
+		} else {
+			sg->offset = (*task_offset + se_mem->se_off);
+
+			if ((se_mem->se_len - *task_offset) > task_size) {
+				sg->length = task_size;
+				/*
+				 * Determine if we need to calculate an offset
+				 * into the struct se_mem on the next go around..
+				 */
+				task_size -= sg->length;
+				if (!(task_size))
+					*task_offset += sg->length;
+
+				goto next;
+			} else {
+				sg->length = (se_mem->se_len - *task_offset);
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list))) {
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+					(*se_mem_cnt)++;
+				}
+			}
+
+			*task_offset = 0;
+		}
+		task_size -= sg->length;
+next:
+		DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
+			" task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
+			sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
+
+		sg_no++;
+		if (!(task_size))
+			break;
+
+		sg = sg_next(sg);
+
+		if (task_size > se_cmd->data_length)
+			BUG();
+	}
+	*out_se_mem = se_mem;
+
+	DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
+		" SGs\n", task->task_no, *se_mem_cnt, sg_no);
+
+	return 0;
+}
+
+/*
+ * This function can be used by HW target mode drivers to create a linked
+ * scatterlist from all contiguously allocated struct se_task->task_sg[].
+ * This is intended to be called during the completion path by TCM Core
+ * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
+ */
+void transport_do_task_sg_chain(struct se_cmd *cmd)
+{
+	struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
+	struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
+	struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
+	struct se_task *task;
+	struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
+	u32 task_sg_num = 0, sg_count = 0;
+	int i;
+
+	if (tfo->task_sg_chaining == 0) {
+		printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
+				" %s\n", tfo->get_fabric_name());
+		dump_stack();
+		return;
+	}
+	/*
+	 * Walk the struct se_task list and setup scatterlist chains
+	 * for each contiguosly allocated struct se_task->task_sg[].
+	 */
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		if (!(task->task_sg) || !(task->task_padded_sg))
+			continue;
+
+		if (sg_head && sg_link) {
+			sg_head_cur = &task->task_sg[0];
+			sg_link_cur = &task->task_sg[task->task_sg_num];
+			/*
+			 * Either add chain or mark end of scatterlist
+			 */
+			if (!(list_is_last(&task->t_list,
+					&T_TASK(cmd)->t_task_list))) {
+				/*
+				 * Clear existing SGL termination bit set in
+				 * transport_calc_sg_num(), see sg_mark_end()
+				 */
+				sg_end_cur = &task->task_sg[task->task_sg_num - 1];
+				sg_end_cur->page_link &= ~0x02;
+
+				sg_chain(sg_head, task_sg_num, sg_head_cur);
+				sg_count += (task->task_sg_num + 1);
+			} else
+				sg_count += task->task_sg_num;
+
+			sg_head = sg_head_cur;
+			sg_link = sg_link_cur;
+			task_sg_num = task->task_sg_num;
+			continue;
+		}
+		sg_head = sg_first = &task->task_sg[0];
+		sg_link = &task->task_sg[task->task_sg_num];
+		task_sg_num = task->task_sg_num;
+		/*
+		 * Check for single task..
+		 */
+		if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
+			/*
+			 * Clear existing SGL termination bit set in
+			 * transport_calc_sg_num(), see sg_mark_end()
+			 */
+			sg_end = &task->task_sg[task->task_sg_num - 1];
+			sg_end->page_link &= ~0x02;
+			sg_count += (task->task_sg_num + 1);
+		} else
+			sg_count += task->task_sg_num;
+	}
+	/*
+	 * Setup the starting pointer and total t_tasks_sg_linked_no including
+	 * padding SGs for linking and to mark the end.
+	 */
+	T_TASK(cmd)->t_tasks_sg_chained = sg_first;
+	T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
+
+	DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
+		" t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
+		T_TASK(cmd)->t_tasks_sg_chained_no);
+
+	for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
+			T_TASK(cmd)->t_tasks_sg_chained_no, i) {
+
+		DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
+			sg, sg_page(sg), sg->length, sg->offset);
+		if (sg_is_chain(sg))
+			DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+		if (sg_is_last(sg))
+			DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+	}
+
+}
+EXPORT_SYMBOL(transport_do_task_sg_chain);
+
+static int transport_do_se_mem_map(
+	struct se_device *dev,
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	struct se_mem *in_se_mem,
+	struct se_mem **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset_in)
+{
+	u32 task_offset = *task_offset_in;
+	int ret = 0;
+	/*
+	 * se_subsystem_api_t->do_se_mem_map is used when internal allocation
+	 * has been done by the transport plugin.
+	 */
+	if (TRANSPORT(dev)->do_se_mem_map) {
+		ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
+				in_mem, in_se_mem, out_se_mem, se_mem_cnt,
+				task_offset_in);
+		if (ret == 0)
+			T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+
+		return ret;
+	}
+	/*
+	 * This is the normal path for all normal non BIDI and BIDI-COMMAND
+	 * WRITE payloads..  If we need to do BIDI READ passthrough for
+	 * TCM/pSCSI the first call to transport_do_se_mem_map ->
+	 * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
+	 * allocation for task->task_sg_bidi, and the subsequent call to
+	 * transport_do_se_mem_map() from transport_generic_get_cdb_count()
+	 */
+	if (!(task->task_sg_bidi)) {
+		/*
+		 * Assume default that transport plugin speaks preallocated
+		 * scatterlists.
+		 */
+		if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
+			return -1;
+		/*
+		 * struct se_task->task_sg now contains the struct scatterlist array.
+		 */
+		return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+					in_se_mem, out_se_mem, se_mem_cnt,
+					task_offset_in);
+	}
+	/*
+	 * Handle the se_mem_list -> struct task->task_sg_bidi
+	 * memory map for the extra BIDI READ payload
+	 */
+	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
+				in_se_mem, out_se_mem, se_mem_cnt,
+				task_offset_in);
+}
+
+static u32 transport_generic_get_cdb_count(
+	struct se_cmd *cmd,
+	unsigned long long lba,
+	u32 sectors,
+	enum dma_data_direction data_direction,
+	struct list_head *mem_list,
+	int set_counts)
+{
+	unsigned char *cdb = NULL;
+	struct se_task *task;
+	struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+	struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
+	struct se_device *dev = SE_DEV(cmd);
+	int max_sectors_set = 0, ret;
+	u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
+
+	if (!mem_list) {
+		printk(KERN_ERR "mem_list is NULL in transport_generic_get"
+				"_cdb_count()\n");
+		return 0;
+	}
+	/*
+	 * While using RAMDISK_DR backstores is the only case where
+	 * mem_list will ever be empty at this point.
+	 */
+	if (!(list_empty(mem_list)))
+		se_mem = list_entry(mem_list->next, struct se_mem, se_list);
+	/*
+	 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
+	 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
+	 */
+	if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+	    !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
+	    (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
+		se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
+					struct se_mem, se_list);
+
+	while (sectors) {
+		DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
+			CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
+			transport_dev_end_lba(dev));
+
+		task = transport_generic_get_task(cmd, data_direction);
+		if (!(task))
+			goto out;
+
+		transport_set_tasks_sectors(task, dev, lba, sectors,
+				&max_sectors_set);
+
+		task->task_lba = lba;
+		lba += task->task_sectors;
+		sectors -= task->task_sectors;
+		task->task_size = (task->task_sectors *
+				   DEV_ATTRIB(dev)->block_size);
+
+		cdb = TRANSPORT(dev)->get_cdb(task);
+		if ((cdb)) {
+			memcpy(cdb, T_TASK(cmd)->t_task_cdb,
+				scsi_command_size(T_TASK(cmd)->t_task_cdb));
+			cmd->transport_split_cdb(task->task_lba,
+					&task->task_sectors, cdb);
+		}
+
+		/*
+		 * Perform the SE OBJ plugin and/or Transport plugin specific
+		 * mapping for T_TASK(cmd)->t_mem_list. And setup the
+		 * task->task_sg and if necessary task->task_sg_bidi
+		 */
+		ret = transport_do_se_mem_map(dev, task, mem_list,
+				NULL, se_mem, &se_mem_lout, &se_mem_cnt,
+				&task_offset_in);
+		if (ret < 0)
+			goto out;
+
+		se_mem = se_mem_lout;
+		/*
+		 * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
+		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
+		 *
+		 * Note that the first call to transport_do_se_mem_map() above will
+		 * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
+		 * -> transport_calc_sg_num(), and the second here will do the
+		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
+		 */
+		if (task->task_sg_bidi != NULL) {
+			ret = transport_do_se_mem_map(dev, task,
+				T_TASK(cmd)->t_mem_bidi_list, NULL,
+				se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
+				&task_offset_in);
+			if (ret < 0)
+				goto out;
+
+			se_mem_bidi = se_mem_bidi_lout;
+		}
+		task_cdbs++;
+
+		DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
+				task_cdbs, task->task_sg_num);
+
+		if (max_sectors_set) {
+			max_sectors_set = 0;
+			continue;
+		}
+
+		if (!sectors)
+			break;
+	}
+
+	if (set_counts) {
+		atomic_inc(&T_TASK(cmd)->t_fe_count);
+		atomic_inc(&T_TASK(cmd)->t_se_count);
+	}
+
+	DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
+		CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
+		? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
+
+	return task_cdbs;
+out:
+	return 0;
+}
+
+static int
+transport_map_control_cmd_to_task(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *cdb;
+	struct se_task *task;
+	int ret;
+
+	task = transport_generic_get_task(cmd, cmd->data_direction);
+	if (!task)
+		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+	cdb = TRANSPORT(dev)->get_cdb(task);
+	if (cdb)
+		memcpy(cdb, cmd->t_task->t_task_cdb,
+			scsi_command_size(cmd->t_task->t_task_cdb));
+
+	task->task_size = cmd->data_length;
+	task->task_sg_num =
+		(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
+
+	atomic_inc(&cmd->t_task->t_fe_count);
+	atomic_inc(&cmd->t_task->t_se_count);
+
+	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
+		struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+		u32 se_mem_cnt = 0, task_offset = 0;
+
+		BUG_ON(list_empty(cmd->t_task->t_mem_list));
+
+		ret = transport_do_se_mem_map(dev, task,
+				cmd->t_task->t_mem_list, NULL, se_mem,
+				&se_mem_lout, &se_mem_cnt, &task_offset);
+		if (ret < 0)
+			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+		if (dev->transport->map_task_SG)
+			return dev->transport->map_task_SG(task);
+		return 0;
+	} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+		if (dev->transport->map_task_non_SG)
+			return dev->transport->map_task_non_SG(task);
+		return 0;
+	} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+		if (dev->transport->cdb_none)
+			return dev->transport->cdb_none(task);
+		return 0;
+	} else {
+		BUG();
+		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+	}
+}
+
+/*	 transport_generic_new_cmd(): Called from transport_processing_thread()
+ *
+ *	 Allocate storage transport resources from a set of values predefined
+ *	 by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
+ *	 Any non zero return here is treated as an "out of resource' op here.
+ */
+	/*
+	 * Generate struct se_task(s) and/or their payloads for this CDB.
+	 */
+static int transport_generic_new_cmd(struct se_cmd *cmd)
+{
+	struct se_portal_group *se_tpg;
+	struct se_task *task;
+	struct se_device *dev = SE_DEV(cmd);
+	int ret = 0;
+
+	/*
+	 * Determine is the TCM fabric module has already allocated physical
+	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
+	 * to setup beforehand the linked list of physical memory at
+	 * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
+	 */
+	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+		ret = transport_allocate_resources(cmd);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = transport_get_sectors(cmd);
+	if (ret < 0)
+		return ret;
+
+	ret = transport_new_cmd_obj(cmd);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Determine if the calling TCM fabric module is talking to
+	 * Linux/NET via kernel sockets and needs to allocate a
+	 * struct iovec array to complete the struct se_cmd
+	 */
+	se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
+	if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
+		ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
+		if (ret < 0)
+			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+	}
+
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+		list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+			if (atomic_read(&task->task_sent))
+				continue;
+			if (!dev->transport->map_task_SG)
+				continue;
+
+			ret = dev->transport->map_task_SG(task);
+			if (ret < 0)
+				return ret;
+		}
+	} else {
+		ret = transport_map_control_cmd_to_task(cmd);
+		if (ret < 0)
+			return ret;
+	}
+
+	/*
+	 * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
+	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
+	 * will be added to the struct se_device execution queue after its WRITE
+	 * data has arrived. (ie: It gets handled by the transport processing
+	 * thread a second time)
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		transport_add_tasks_to_state_queue(cmd);
+		return transport_generic_write_pending(cmd);
+	}
+	/*
+	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
+	 * to the execution queue.
+	 */
+	transport_execute_tasks(cmd);
+	return 0;
+}
+
+/*	transport_generic_process_write():
+ *
+ *
+ */
+void transport_generic_process_write(struct se_cmd *cmd)
+{
+#if 0
+	/*
+	 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
+	 * original EDTL
+	 */
+	if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+		if (!T_TASK(cmd)->t_tasks_se_num) {
+			unsigned char *dst, *buf =
+				(unsigned char *)T_TASK(cmd)->t_task_buf;
+
+			dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
+			if (!(dst)) {
+				printk(KERN_ERR "Unable to allocate memory for"
+						" WRITE underflow\n");
+				transport_generic_request_failure(cmd, NULL,
+					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+				return;
+			}
+			memcpy(dst, buf, cmd->cmd_spdtl);
+
+			kfree(T_TASK(cmd)->t_task_buf);
+			T_TASK(cmd)->t_task_buf = dst;
+		} else {
+			struct scatterlist *sg =
+				(struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
+			struct scatterlist *orig_sg;
+
+			orig_sg = kzalloc(sizeof(struct scatterlist) *
+					T_TASK(cmd)->t_tasks_se_num,
+					GFP_KERNEL))) {
+			if (!(orig_sg)) {
+				printk(KERN_ERR "Unable to allocate memory"
+						" for WRITE underflow\n");
+				transport_generic_request_failure(cmd, NULL,
+					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+				return;
+			}
+
+			memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
+					sizeof(struct scatterlist) *
+					T_TASK(cmd)->t_tasks_se_num);
+
+			cmd->data_length = cmd->cmd_spdtl;
+			/*
+			 * FIXME, clear out original struct se_task and state
+			 * information.
+			 */
+			if (transport_generic_new_cmd(cmd) < 0) {
+				transport_generic_request_failure(cmd, NULL,
+					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+				kfree(orig_sg);
+				return;
+			}
+
+			transport_memcpy_write_sg(cmd, orig_sg);
+		}
+	}
+#endif
+	transport_execute_tasks(cmd);
+}
+EXPORT_SYMBOL(transport_generic_process_write);
+
+/*	transport_generic_write_pending():
+ *
+ *
+ */
+static int transport_generic_write_pending(struct se_cmd *cmd)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	cmd->t_state = TRANSPORT_WRITE_PENDING;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * For the TCM control CDBs using a contiguous buffer, do the memcpy
+	 * from the passed Linux/SCSI struct scatterlist located at
+	 * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
+	 * T_TASK(se_cmd)->t_task_buf.
+	 */
+	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+		transport_memcpy_read_contig(cmd,
+				T_TASK(cmd)->t_task_buf,
+				T_TASK(cmd)->t_task_pt_sgl);
+	/*
+	 * Clear the se_cmd for WRITE_PENDING status in order to set
+	 * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
+	 * can be called from HW target mode interrupt code.  This is safe
+	 * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
+	 * because the se_cmd->se_lun pointer is not being cleared.
+	 */
+	transport_cmd_check_stop(cmd, 1, 0);
+
+	/*
+	 * Call the fabric write_pending function here to let the
+	 * frontend know that WRITE buffers are ready.
+	 */
+	ret = CMD_TFO(cmd)->write_pending(cmd);
+	if (ret < 0)
+		return ret;
+
+	return PYX_TRANSPORT_WRITE_PENDING;
+}
+
+/*	transport_release_cmd_to_pool():
+ *
+ *
+ */
+void transport_release_cmd_to_pool(struct se_cmd *cmd)
+{
+	BUG_ON(!T_TASK(cmd));
+	BUG_ON(!CMD_TFO(cmd));
+
+	transport_free_se_cmd(cmd);
+	CMD_TFO(cmd)->release_cmd_to_pool(cmd);
+}
+EXPORT_SYMBOL(transport_release_cmd_to_pool);
+
+/*	transport_generic_free_cmd():
+ *
+ *	Called from processing frontend to release storage engine resources
+ */
+void transport_generic_free_cmd(
+	struct se_cmd *cmd,
+	int wait_for_tasks,
+	int release_to_pool,
+	int session_reinstatement)
+{
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
+		transport_release_cmd_to_pool(cmd);
+	else {
+		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
+
+		if (SE_LUN(cmd)) {
+#if 0
+			printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
+				" SE_LUN(cmd)\n", cmd,
+				CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+			transport_lun_remove_cmd(cmd);
+		}
+
+		if (wait_for_tasks && cmd->transport_wait_for_tasks)
+			cmd->transport_wait_for_tasks(cmd, 0, 0);
+
+		transport_generic_remove(cmd, release_to_pool,
+				session_reinstatement);
+	}
+}
+EXPORT_SYMBOL(transport_generic_free_cmd);
+
+static void transport_nop_wait_for_tasks(
+	struct se_cmd *cmd,
+	int remove_cmd,
+	int session_reinstatement)
+{
+	return;
+}
+
+/*	transport_lun_wait_for_tasks():
+ *
+ *	Called from ConfigFS context to stop the passed struct se_cmd to allow
+ *	an struct se_lun to be successfully shutdown.
+ */
+static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
+{
+	unsigned long flags;
+	int ret;
+	/*
+	 * If the frontend has already requested this struct se_cmd to
+	 * be stopped, we can safely ignore this struct se_cmd.
+	 */
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+		atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+		DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
+			" TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		transport_cmd_check_stop(cmd, 1, 0);
+		return -1;
+	}
+	atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+	ret = transport_stop_tasks_for_cmd(cmd);
+
+	DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
+			" %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
+	if (!ret) {
+		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+				CMD_TFO(cmd)->get_task_tag(cmd));
+		wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+				CMD_TFO(cmd)->get_task_tag(cmd));
+	}
+	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+	return 0;
+}
+
+/* #define DEBUG_CLEAR_LUN */
+#ifdef DEBUG_CLEAR_LUN
+#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CLEAR_L(x...)
+#endif
+
+static void __transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+	struct se_cmd *cmd = NULL;
+	unsigned long lun_flags, cmd_flags;
+	/*
+	 * Do exception processing and return CHECK_CONDITION status to the
+	 * Initiator Port.
+	 */
+	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+	while (!list_empty_careful(&lun->lun_cmd_list)) {
+		cmd = list_entry(lun->lun_cmd_list.next,
+			struct se_cmd, se_lun_list);
+		list_del(&cmd->se_lun_list);
+
+		if (!(T_TASK(cmd))) {
+			printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
+				"[i,t]_state: %u/%u\n",
+				CMD_TFO(cmd)->get_task_tag(cmd),
+				CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+			BUG();
+		}
+		atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+		/*
+		 * This will notify iscsi_target_transport.c:
+		 * transport_cmd_check_stop() that a LUN shutdown is in
+		 * progress for the iscsi_cmd_t.
+		 */
+		spin_lock(&T_TASK(cmd)->t_state_lock);
+		DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
+			"_lun_stop for  ITT: 0x%08x\n",
+			SE_LUN(cmd)->unpacked_lun,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+		atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
+		spin_unlock(&T_TASK(cmd)->t_state_lock);
+
+		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+
+		if (!(SE_LUN(cmd))) {
+			printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
+				CMD_TFO(cmd)->get_task_tag(cmd),
+				CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+			BUG();
+		}
+		/*
+		 * If the Storage engine still owns the iscsi_cmd_t, determine
+		 * and/or stop its context.
+		 */
+		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
+			"_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
+			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+			continue;
+		}
+
+		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+			"_wait_for_tasks(): SUCCESS\n",
+			SE_LUN(cmd)->unpacked_lun,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+		if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+			goto check_cond;
+		}
+		atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+		transport_all_task_dev_remove_state(cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+
+		transport_free_dev_tasks(cmd);
+		/*
+		 * The Storage engine stopped this struct se_cmd before it was
+		 * send to the fabric frontend for delivery back to the
+		 * Initiator Node.  Return this SCSI CDB back with an
+		 * CHECK_CONDITION status.
+		 */
+check_cond:
+		transport_send_check_condition_and_sense(cmd,
+				TCM_NON_EXISTENT_LUN, 0);
+		/*
+		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
+		 * be released, notify the waiting thread now that LU has
+		 * finished accessing it.
+		 */
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+		if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
+			DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+				" struct se_cmd: %p ITT: 0x%08x\n",
+				lun->unpacked_lun,
+				cmd, CMD_TFO(cmd)->get_task_tag(cmd));
+
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					cmd_flags);
+			transport_cmd_check_stop(cmd, 1, 0);
+			complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+			continue;
+		}
+		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+			lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
+
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+	}
+	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+}
+
+static int transport_clear_lun_thread(void *p)
+{
+	struct se_lun *lun = (struct se_lun *)p;
+
+	__transport_clear_lun_from_sessions(lun);
+	complete(&lun->lun_shutdown_comp);
+
+	return 0;
+}
+
+int transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+	struct task_struct *kt;
+
+	kt = kthread_run(transport_clear_lun_thread, (void *)lun,
+			"tcm_cl_%u", lun->unpacked_lun);
+	if (IS_ERR(kt)) {
+		printk(KERN_ERR "Unable to start clear_lun thread\n");
+		return -1;
+	}
+	wait_for_completion(&lun->lun_shutdown_comp);
+
+	return 0;
+}
+
+/*	transport_generic_wait_for_tasks():
+ *
+ *	Called from frontend or passthrough context to wait for storage engine
+ *	to pause and/or release frontend generated struct se_cmd.
+ */
+static void transport_generic_wait_for_tasks(
+	struct se_cmd *cmd,
+	int remove_cmd,
+	int session_reinstatement)
+{
+	unsigned long flags;
+
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
+		return;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * If we are already stopped due to an external event (ie: LUN shutdown)
+	 * sleep until the connection can have the passed struct se_cmd back.
+	 * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
+	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
+	 * has completed its operation on the struct se_cmd.
+	 */
+	if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+
+		DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
+			" wait_for_completion(&T_TASK(cmd)transport_lun_fe"
+			"_stop_comp); for ITT: 0x%08x\n",
+			CMD_TFO(cmd)->get_task_tag(cmd));
+		/*
+		 * There is a special case for WRITES where a FE exception +
+		 * LUN shutdown means ConfigFS context is still sleeping on
+		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
+		 * We go ahead and up transport_lun_stop_comp just to be sure
+		 * here.
+		 */
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		complete(&T_TASK(cmd)->transport_lun_stop_comp);
+		wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+		transport_all_task_dev_remove_state(cmd);
+		/*
+		 * At this point, the frontend who was the originator of this
+		 * struct se_cmd, now owns the structure and can be released through
+		 * normal means below.
+		 */
+		DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
+			" wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
+			"stop_comp); for ITT: 0x%08x\n",
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+	}
+	if (!atomic_read(&T_TASK(cmd)->t_transport_active))
+		goto remove;
+
+	atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
+
+	DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
+		" = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+		CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+		cmd->deferred_t_state);
+
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+	wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+	atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
+
+	DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
+		"&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
+		CMD_TFO(cmd)->get_task_tag(cmd));
+remove:
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	if (!remove_cmd)
+		return;
+
+	transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
+}
+
+static int transport_get_sense_codes(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	*asc = cmd->scsi_asc;
+	*ascq = cmd->scsi_ascq;
+
+	return 0;
+}
+
+static int transport_set_sense_codes(
+	struct se_cmd *cmd,
+	u8 asc,
+	u8 ascq)
+{
+	cmd->scsi_asc = asc;
+	cmd->scsi_ascq = ascq;
+
+	return 0;
+}
+
+int transport_send_check_condition_and_sense(
+	struct se_cmd *cmd,
+	u8 reason,
+	int from_transport)
+{
+	unsigned char *buffer = cmd->sense_buffer;
+	unsigned long flags;
+	int offset;
+	u8 asc = 0, ascq = 0;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return 0;
+	}
+	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	if (!reason && from_transport)
+		goto after_reason;
+
+	if (!from_transport)
+		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+	/*
+	 * Data Segment and SenseLength of the fabric response PDU.
+	 *
+	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
+	 * from include/scsi/scsi_cmnd.h
+	 */
+	offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+				TRANSPORT_SENSE_BUFFER);
+	/*
+	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
+	 * SENSE KEY values from include/scsi/scsi.h
+	 */
+	switch (reason) {
+	case TCM_NON_EXISTENT_LUN:
+	case TCM_UNSUPPORTED_SCSI_OPCODE:
+	case TCM_SECTOR_COUNT_TOO_MANY:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID COMMAND OPERATION CODE */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
+		break;
+	case TCM_UNKNOWN_MODE_PAGE:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID FIELD IN CDB */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+		break;
+	case TCM_CHECK_CONDITION_ABORT_CMD:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* BUS DEVICE RESET FUNCTION OCCURRED */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
+		break;
+	case TCM_INCORRECT_AMOUNT_OF_DATA:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* WRITE ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+		/* NOT ENOUGH UNSOLICITED DATA */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
+		break;
+	case TCM_INVALID_CDB_FIELD:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* INVALID FIELD IN CDB */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+		break;
+	case TCM_INVALID_PARAMETER_LIST:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* INVALID FIELD IN PARAMETER LIST */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
+		break;
+	case TCM_UNEXPECTED_UNSOLICITED_DATA:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* WRITE ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+		/* UNEXPECTED_UNSOLICITED_DATA */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
+		break;
+	case TCM_SERVICE_CRC_ERROR:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* PROTOCOL SERVICE CRC ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
+		/* N/A */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
+		break;
+	case TCM_SNACK_REJECTED:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* READ ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
+		/* FAILED RETRANSMISSION REQUEST */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
+		break;
+	case TCM_WRITE_PROTECTED:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* DATA PROTECT */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+		/* WRITE PROTECTED */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+		break;
+	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* UNIT ATTENTION */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+		break;
+	case TCM_CHECK_CONDITION_NOT_READY:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* Not Ready */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+		transport_get_sense_codes(cmd, &asc, &ascq);
+		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+		break;
+	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+	default:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* LOGICAL UNIT COMMUNICATION FAILURE */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
+		break;
+	}
+	/*
+	 * This code uses linux/include/scsi/scsi.h SAM status codes!
+	 */
+	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+	/*
+	 * Automatically padded, this value is encoded in the fabric's
+	 * data_length response PDU containing the SCSI defined sense data.
+	 */
+	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
+
+after_reason:
+	CMD_TFO(cmd)->queue_status(cmd);
+	return 0;
+}
+EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+	int ret = 0;
+
+	if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
+		if (!(send_status) ||
+		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+			return 1;
+#if 0
+		printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+			" status for CDB: 0x%02x ITT: 0x%08x\n",
+			T_TASK(cmd)->t_task_cdb[0],
+			CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+		CMD_TFO(cmd)->queue_status(cmd);
+		ret = 1;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(transport_check_aborted_status);
+
+void transport_send_task_abort(struct se_cmd *cmd)
+{
+	/*
+	 * If there are still expected incoming fabric WRITEs, we wait
+	 * until until they have completed before sending a TASK_ABORTED
+	 * response.  This response with TASK_ABORTED status will be
+	 * queued back to fabric module by transport_check_aborted_status().
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
+			atomic_inc(&T_TASK(cmd)->t_transport_aborted);
+			smp_mb__after_atomic_inc();
+			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+			transport_new_cmd_failure(cmd);
+			return;
+		}
+	}
+	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+#if 0
+	printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+		" ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
+		CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+	CMD_TFO(cmd)->queue_status(cmd);
+}
+
+/*	transport_generic_do_tmr():
+ *
+ *
+ */
+int transport_generic_do_tmr(struct se_cmd *cmd)
+{
+	struct se_cmd *ref_cmd;
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_tmr_req *tmr = cmd->se_tmr_req;
+	int ret;
+
+	switch (tmr->function) {
+	case ABORT_TASK:
+		ref_cmd = tmr->ref_cmd;
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	case ABORT_TASK_SET:
+	case CLEAR_ACA:
+	case CLEAR_TASK_SET:
+		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+		break;
+	case LUN_RESET:
+		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
+		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
+					 TMR_FUNCTION_REJECTED;
+		break;
+#if 0
+	case TARGET_WARM_RESET:
+		transport_generic_host_reset(dev->se_hba);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	case TARGET_COLD_RESET:
+		transport_generic_host_reset(dev->se_hba);
+		transport_generic_cold_reset(dev->se_hba);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+#endif
+	default:
+		printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+				tmr->function);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	}
+
+	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+	CMD_TFO(cmd)->queue_tm_rsp(cmd);
+
+	transport_cmd_check_stop(cmd, 2, 0);
+	return 0;
+}
+
+/*
+ *	Called with spin_lock_irq(&dev->execute_task_lock); held
+ *
+ */
+static struct se_task *
+transport_get_task_from_state_list(struct se_device *dev)
+{
+	struct se_task *task;
+
+	if (list_empty(&dev->state_task_list))
+		return NULL;
+
+	list_for_each_entry(task, &dev->state_task_list, t_state_list)
+		break;
+
+	list_del(&task->t_state_list);
+	atomic_set(&task->task_state_active, 0);
+
+	return task;
+}
+
+static void transport_processing_shutdown(struct se_device *dev)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr;
+	struct se_task *task;
+	u8 state;
+	unsigned long flags;
+	/*
+	 * Empty the struct se_device's struct se_task state list.
+	 */
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	while ((task = transport_get_task_from_state_list(dev))) {
+		if (!(TASK_CMD(task))) {
+			printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+			continue;
+		}
+		cmd = TASK_CMD(task);
+
+		if (!T_TASK(cmd)) {
+			printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+				" %p ITT: 0x%08x\n", task, cmd,
+				CMD_TFO(cmd)->get_task_tag(cmd));
+			continue;
+		}
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+		DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
+			" i_state/def_i_state: %d/%d, t_state/def_t_state:"
+			" %d/%d cdb: 0x%02x\n", cmd, task,
+			CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
+			CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
+			cmd->t_state, cmd->deferred_t_state,
+			T_TASK(cmd)->t_task_cdb[0]);
+		DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+			" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
+			" t_transport_stop: %d t_transport_sent: %d\n",
+			CMD_TFO(cmd)->get_task_tag(cmd),
+			T_TASK(cmd)->t_task_cdbs,
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+			atomic_read(&T_TASK(cmd)->t_transport_active),
+			atomic_read(&T_TASK(cmd)->t_transport_stop),
+			atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+		if (atomic_read(&task->task_active)) {
+			atomic_set(&task->task_stop, 1);
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+
+			DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+				" %p\n", task, dev);
+			wait_for_completion(&task->task_stop_comp);
+			DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+				task, dev);
+
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+			atomic_set(&task->task_active, 0);
+			atomic_set(&task->task_stop, 0);
+		}
+		__transport_stop_task_timer(task, &flags);
+
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+			spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+
+			DEBUG_DO("Skipping task: %p, dev: %p for"
+				" t_task_cdbs_ex_left: %d\n", task, dev,
+				atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+
+		if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+			DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+					" %p\n", task, dev);
+
+			if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+				spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+				transport_send_check_condition_and_sense(
+					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
+					0);
+				transport_remove_cmd_from_queue(cmd,
+					SE_DEV(cmd)->dev_queue_obj);
+
+				transport_lun_remove_cmd(cmd);
+				transport_cmd_check_stop(cmd, 1, 0);
+			} else {
+				spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+
+				transport_remove_cmd_from_queue(cmd,
+					SE_DEV(cmd)->dev_queue_obj);
+
+				transport_lun_remove_cmd(cmd);
+
+				if (transport_cmd_check_stop(cmd, 1, 0))
+					transport_generic_remove(cmd, 0, 0);
+			}
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+		DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+				task, dev);
+
+		if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+			transport_send_check_condition_and_sense(cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+			transport_remove_cmd_from_queue(cmd,
+				SE_DEV(cmd)->dev_queue_obj);
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop(cmd, 1, 0);
+		} else {
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+
+			transport_remove_cmd_from_queue(cmd,
+				SE_DEV(cmd)->dev_queue_obj);
+			transport_lun_remove_cmd(cmd);
+
+			if (transport_cmd_check_stop(cmd, 1, 0))
+				transport_generic_remove(cmd, 0, 0);
+		}
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+	/*
+	 * Empty the struct se_device's struct se_cmd list.
+	 */
+	spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
+		spin_unlock_irqrestore(
+				&dev->dev_queue_obj->cmd_queue_lock, flags);
+		cmd = (struct se_cmd *)qr->cmd;
+		state = qr->state;
+		kfree(qr);
+
+		DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
+				cmd, state);
+
+		if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+			transport_send_check_condition_and_sense(cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop(cmd, 1, 0);
+		} else {
+			transport_lun_remove_cmd(cmd);
+			if (transport_cmd_check_stop(cmd, 1, 0))
+				transport_generic_remove(cmd, 0, 0);
+		}
+		spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+}
+
+/*	transport_processing_thread():
+ *
+ *
+ */
+static int transport_processing_thread(void *param)
+{
+	int ret, t_state;
+	struct se_cmd *cmd;
+	struct se_device *dev = (struct se_device *) param;
+	struct se_queue_req *qr;
+
+	set_user_nice(current, -20);
+
+	while (!kthread_should_stop()) {
+		ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
+				atomic_read(&dev->dev_queue_obj->queue_cnt) ||
+				kthread_should_stop());
+		if (ret < 0)
+			goto out;
+
+		spin_lock_irq(&dev->dev_status_lock);
+		if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
+			spin_unlock_irq(&dev->dev_status_lock);
+			transport_processing_shutdown(dev);
+			continue;
+		}
+		spin_unlock_irq(&dev->dev_status_lock);
+
+get_cmd:
+		__transport_execute_tasks(dev);
+
+		qr = transport_get_qr_from_queue(dev->dev_queue_obj);
+		if (!(qr))
+			continue;
+
+		cmd = (struct se_cmd *)qr->cmd;
+		t_state = qr->state;
+		kfree(qr);
+
+		switch (t_state) {
+		case TRANSPORT_NEW_CMD_MAP:
+			if (!(CMD_TFO(cmd)->new_cmd_map)) {
+				printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
+					" NULL for TRANSPORT_NEW_CMD_MAP\n");
+				BUG();
+			}
+			ret = CMD_TFO(cmd)->new_cmd_map(cmd);
+			if (ret < 0) {
+				cmd->transport_error_status = ret;
+				transport_generic_request_failure(cmd, NULL,
+						0, (cmd->data_direction !=
+						    DMA_TO_DEVICE));
+				break;
+			}
+			/* Fall through */
+		case TRANSPORT_NEW_CMD:
+			ret = transport_generic_new_cmd(cmd);
+			if (ret < 0) {
+				cmd->transport_error_status = ret;
+				transport_generic_request_failure(cmd, NULL,
+					0, (cmd->data_direction !=
+					 DMA_TO_DEVICE));
+			}
+			break;
+		case TRANSPORT_PROCESS_WRITE:
+			transport_generic_process_write(cmd);
+			break;
+		case TRANSPORT_COMPLETE_OK:
+			transport_stop_all_task_timers(cmd);
+			transport_generic_complete_ok(cmd);
+			break;
+		case TRANSPORT_REMOVE:
+			transport_generic_remove(cmd, 1, 0);
+			break;
+		case TRANSPORT_PROCESS_TMR:
+			transport_generic_do_tmr(cmd);
+			break;
+		case TRANSPORT_COMPLETE_FAILURE:
+			transport_generic_request_failure(cmd, NULL, 1, 1);
+			break;
+		case TRANSPORT_COMPLETE_TIMEOUT:
+			transport_stop_all_task_timers(cmd);
+			transport_generic_request_timeout(cmd);
+			break;
+		default:
+			printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+				" %d for ITT: 0x%08x i_state: %d on SE LUN:"
+				" %u\n", t_state, cmd->deferred_t_state,
+				CMD_TFO(cmd)->get_task_tag(cmd),
+				CMD_TFO(cmd)->get_cmd_state(cmd),
+				SE_LUN(cmd)->unpacked_lun);
+			BUG();
+		}
+
+		goto get_cmd;
+	}
+
+out:
+	transport_release_all_cmds(dev);
+	dev->process_thread = NULL;
+	return 0;
+}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
new file mode 100644
index 0000000..a2ef346
--- /dev/null
+++ b/drivers/target/target_core_ua.c
@@ -0,0 +1,332 @@
+/*******************************************************************************
+ * Filename: target_core_ua.c
+ *
+ * This file contains logic for SPC-3 Unit Attention emulation
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+int core_scsi3_ua_check(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+
+	if (!(sess))
+		return 0;
+
+	nacl = sess->se_node_acl;
+	if (!(nacl))
+		return 0;
+
+	deve = &nacl->device_list[cmd->orig_fe_lun];
+	if (!(atomic_read(&deve->ua_count)))
+		return 0;
+	/*
+	 * From sam4r14, section 5.14 Unit attention condition:
+	 *
+	 * a) if an INQUIRY command enters the enabled command state, the
+	 *    device server shall process the INQUIRY command and shall neither
+	 *    report nor clear any unit attention condition;
+	 * b) if a REPORT LUNS command enters the enabled command state, the
+	 *    device server shall process the REPORT LUNS command and shall not
+	 *    report any unit attention condition;
+	 * e) if a REQUEST SENSE command enters the enabled command state while
+	 *    a unit attention condition exists for the SCSI initiator port
+	 *    associated with the I_T nexus on which the REQUEST SENSE command
+	 *    was received, then the device server shall process the command
+	 *    and either:
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+		return 0;
+	default:
+		return -1;
+	}
+
+	return -1;
+}
+
+int core_scsi3_ua_allocate(
+	struct se_node_acl *nacl,
+	u32 unpacked_lun,
+	u8 asc,
+	u8 ascq)
+{
+	struct se_dev_entry *deve;
+	struct se_ua *ua, *ua_p, *ua_tmp;
+	/*
+	 * PASSTHROUGH OPS
+	 */
+	if (!(nacl))
+		return -1;
+
+	ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
+	if (!(ua)) {
+		printk(KERN_ERR "Unable to allocate struct se_ua\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&ua->ua_dev_list);
+	INIT_LIST_HEAD(&ua->ua_nacl_list);
+
+	ua->ua_nacl = nacl;
+	ua->ua_asc = asc;
+	ua->ua_ascq = ascq;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[unpacked_lun];
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * Do not report the same UNIT ATTENTION twice..
+		 */
+		if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
+			spin_unlock(&deve->ua_lock);
+			spin_unlock_irq(&nacl->device_list_lock);
+			kmem_cache_free(se_ua_cache, ua);
+			return 0;
+		}
+		/*
+		 * Attach the highest priority Unit Attention to
+		 * the head of the list following sam4r14,
+		 * Section 5.14 Unit Attention Condition:
+		 *
+		 * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
+		 * POWER ON OCCURRED or
+		 * DEVICE INTERNAL RESET
+		 * SCSI BUS RESET OCCURRED or
+		 * MICROCODE HAS BEEN CHANGED or
+		 * protocol specific
+		 * BUS DEVICE RESET FUNCTION OCCURRED
+		 * I_T NEXUS LOSS OCCURRED
+		 * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
+		 * all others                                    Lowest
+		 *
+		 * Each of the ASCQ codes listed above are defined in
+		 * the 29h ASC family, see spc4r17 Table D.1
+		 */
+		if (ua_p->ua_asc == 0x29) {
+			if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
+				list_add(&ua->ua_nacl_list,
+						&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else if (ua_p->ua_asc == 0x2a) {
+			/*
+			 * Incoming Family 29h ASCQ codes will override
+			 * Family 2AHh ASCQ codes for Unit Attention condition.
+			 */
+			if ((asc == 0x29) || (ascq > ua_p->ua_asc))
+				list_add(&ua->ua_nacl_list,
+					&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else
+			list_add_tail(&ua->ua_nacl_list,
+				&deve->ua_list);
+		spin_unlock(&deve->ua_lock);
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		atomic_inc(&deve->ua_count);
+		smp_mb__after_atomic_inc();
+		return 0;
+	}
+	list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+		" 0x%02x, ASCQ: 0x%02x\n",
+		TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
+		asc, ascq);
+
+	atomic_inc(&deve->ua_count);
+	smp_mb__after_atomic_inc();
+	return 0;
+}
+
+void core_scsi3_ua_release_all(
+	struct se_dev_entry *deve)
+{
+	struct se_ua *ua, *ua_p;
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+}
+
+void core_scsi3_ua_for_check_condition(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!(sess))
+		return;
+
+	nacl = sess->se_node_acl;
+	if (!(nacl))
+		return;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[cmd->orig_fe_lun];
+	if (!(atomic_read(&deve->ua_count))) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
+	 * sense data for the received CDB.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * For ua_intlck_ctrl code not equal to 00b, only report the
+		 * highest priority UNIT_ATTENTION and ASC/ASCQ without
+		 * clearing it.
+		 */
+		if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			break;
+		}
+		/*
+		 * Otherwise for the default 00b, release the UNIT ATTENTION
+		 * condition.  Return the ASC/ASCQ of the higest priority UA
+		 * (head of the list) in the outgoing CHECK_CONDITION + sense.
+		 */
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
+		" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
+		" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
+		TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+		(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+		"Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
+		cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
+}
+
+int core_scsi3_ua_clear_for_request_sense(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!(sess))
+		return -1;
+
+	nacl = sess->se_node_acl;
+	if (!(nacl))
+		return -1;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[cmd->orig_fe_lun];
+	if (!(atomic_read(&deve->ua_count))) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -1;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list.  The First (and hence highest priority)
+	 * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
+	 * matching struct se_lun.
+	 *
+	 * Once the returning ASC/ASCQ values are set, we go ahead and
+	 * release all of the Unit Attention conditions for the assoicated
+	 * struct se_lun.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
+		" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
+		" ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+		cmd->orig_fe_lun, *asc, *ascq);
+
+	return (head) ? -1 : 0;
+}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
new file mode 100644
index 0000000..6e6b034
--- /dev/null
+++ b/drivers/target/target_core_ua.h
@@ -0,0 +1,36 @@
+#ifndef TARGET_CORE_UA_H
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED	0x00
+#define ASCQ_29H_POWER_ON_OCCURRED				0x01
+#define ASCQ_29H_SCSI_BUS_RESET_OCCURED				0x02
+#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED		0x03
+#define ASCQ_29H_DEVICE_INTERNAL_RESET				0x04
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED	0x05
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD		0x06
+#define ASCQ_29H_NEXUS_LOSS_OCCURRED				0x07
+
+#define ASCQ_2AH_PARAMETERS_CHANGED				0x00
+#define ASCQ_2AH_MODE_PARAMETERS_CHANGED			0x01
+#define ASCQ_2AH_LOG_PARAMETERS_CHANGED				0x02
+#define ASCQ_2AH_RESERVATIONS_PREEMPTED				0x03
+#define ASCQ_2AH_RESERVATIONS_RELEASED				0x04
+#define ASCQ_2AH_REGISTRATIONS_PREEMPTED			0x05
+#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED		0x06
+#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_PRIORITY_CHANGED				0x08
+
+#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS		0x09
+
+extern struct kmem_cache *se_ua_cache;
+
+extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
+extern void core_scsi3_ua_release_all(struct se_dev_entry *);
+extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
+						u8 *, u8 *);
+
+#endif /* TARGET_CORE_UA_H */
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 38244f5..ade0568 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -97,22 +97,26 @@
 	remove_wait_queue(poll->wqh, &poll->wait);
 }
 
+static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
+				unsigned seq)
+{
+	int left;
+	spin_lock_irq(&dev->work_lock);
+	left = seq - work->done_seq;
+	spin_unlock_irq(&dev->work_lock);
+	return left <= 0;
+}
+
 static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 {
 	unsigned seq;
-	int left;
 	int flushing;
 
 	spin_lock_irq(&dev->work_lock);
 	seq = work->queue_seq;
 	work->flushing++;
 	spin_unlock_irq(&dev->work_lock);
-	wait_event(work->done, ({
-		   spin_lock_irq(&dev->work_lock);
-		   left = seq - work->done_seq <= 0;
-		   spin_unlock_irq(&dev->work_lock);
-		   left;
-	}));
+	wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 	spin_lock_irq(&dev->work_lock);
 	flushing = --work->flushing;
 	spin_unlock_irq(&dev->work_lock);
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 0c99de0..b358d04 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -483,7 +483,7 @@
 				  info->screen_base, info->fix.smem_start);
 }
 
-static int __init ep93xxfb_probe(struct platform_device *pdev)
+static int __devinit ep93xxfb_probe(struct platform_device *pdev)
 {
 	struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
 	struct fb_info *info;
@@ -598,7 +598,7 @@
 	return err;
 }
 
-static int ep93xxfb_remove(struct platform_device *pdev)
+static int __devexit ep93xxfb_remove(struct platform_device *pdev)
 {
 	struct fb_info *info = platform_get_drvdata(pdev);
 	struct ep93xx_fbi *fbi = info->par;
@@ -622,7 +622,7 @@
 
 static struct platform_driver ep93xxfb_driver = {
 	.probe		= ep93xxfb_probe,
-	.remove		= ep93xxfb_remove,
+	.remove		= __devexit_p(ep93xxfb_remove),
 	.driver = {
 		.name	= "ep93xx-fb",
 		.owner	= THIS_MODULE,
diff --git a/fs/Kconfig b/fs/Kconfig
index 771f457..9a7921a 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -30,15 +30,6 @@
 source "fs/reiserfs/Kconfig"
 source "fs/jfs/Kconfig"
 
-config FS_POSIX_ACL
-# Posix ACL utility routines (for now, only ext2/ext3/jfs/reiserfs/nfs4)
-#
-# NOTE: you can implement Posix ACLs without these helpers (XFS does).
-# 	Never use this symbol for ifdefs.
-#
-	bool
-	default n
-
 source "fs/xfs/Kconfig"
 source "fs/gfs2/Kconfig"
 source "fs/ocfs2/Kconfig"
@@ -47,6 +38,14 @@
 
 endif # BLOCK
 
+# Posix ACL utility routines
+#
+# Note: Posix ACLs can be implemented without these helpers.  Never use
+# this symbol for ifdefs in core code.
+#
+config FS_POSIX_ACL
+	def_bool n
+
 config EXPORTFS
 	tristate
 
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e6a4ab9..20c106f 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -66,6 +66,7 @@
 	.d_revalidate	= afs_d_revalidate,
 	.d_delete	= afs_d_delete,
 	.d_release	= afs_d_release,
+	.d_automount	= afs_d_automount,
 };
 
 #define AFS_DIR_HASHTBL_SIZE	128
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 0747339..db66c52 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -184,7 +184,8 @@
 	inode->i_generation	= 0;
 
 	set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
-	inode->i_flags |= S_NOATIME;
+	set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+	inode->i_flags |= S_AUTOMOUNT | S_NOATIME;
 	unlock_new_inode(inode);
 	_leave(" = %p", inode);
 	return inode;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 58c633b..5a9b684 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -592,6 +592,7 @@
 extern const struct inode_operations afs_autocell_inode_operations;
 extern const struct file_operations afs_mntpt_file_operations;
 
+extern struct vfsmount *afs_d_automount(struct path *);
 extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
 extern void afs_mntpt_kill_timer(void);
 
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index e83c033..aa59184 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -24,7 +24,6 @@
 				       struct dentry *dentry,
 				       struct nameidata *nd);
 static int afs_mntpt_open(struct inode *inode, struct file *file);
-static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd);
 static void afs_mntpt_expiry_timed_out(struct work_struct *work);
 
 const struct file_operations afs_mntpt_file_operations = {
@@ -34,13 +33,11 @@
 
 const struct inode_operations afs_mntpt_inode_operations = {
 	.lookup		= afs_mntpt_lookup,
-	.follow_link	= afs_mntpt_follow_link,
 	.readlink	= page_readlink,
 	.getattr	= afs_getattr,
 };
 
 const struct inode_operations afs_autocell_inode_operations = {
-	.follow_link	= afs_mntpt_follow_link,
 	.getattr	= afs_getattr,
 };
 
@@ -88,6 +85,7 @@
 		_debug("symlink is a mountpoint");
 		spin_lock(&vnode->lock);
 		set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+		vnode->vfs_inode.i_flags |= S_AUTOMOUNT;
 		spin_unlock(&vnode->lock);
 	}
 
@@ -238,52 +236,24 @@
 }
 
 /*
- * follow a link from a mountpoint directory, thus causing it to be mounted
+ * handle an automount point
  */
-static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
+struct vfsmount *afs_d_automount(struct path *path)
 {
 	struct vfsmount *newmnt;
-	int err;
 
-	_enter("%p{%s},{%s:%p{%s},}",
-	       dentry,
-	       dentry->d_name.name,
-	       nd->path.mnt->mnt_devname,
-	       dentry,
-	       nd->path.dentry->d_name.name);
+	_enter("{%s,%s}", path->mnt->mnt_devname, path->dentry->d_name.name);
 
-	dput(nd->path.dentry);
-	nd->path.dentry = dget(dentry);
+	newmnt = afs_mntpt_do_automount(path->dentry);
+	if (IS_ERR(newmnt))
+		return newmnt;
 
-	newmnt = afs_mntpt_do_automount(nd->path.dentry);
-	if (IS_ERR(newmnt)) {
-		path_put(&nd->path);
-		return (void *)newmnt;
-	}
-
-	mntget(newmnt);
-	err = do_add_mount(newmnt, &nd->path, MNT_SHRINKABLE, &afs_vfsmounts);
-	switch (err) {
-	case 0:
-		path_put(&nd->path);
-		nd->path.mnt = newmnt;
-		nd->path.dentry = dget(newmnt->mnt_root);
-		queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
-				   afs_mntpt_expiry_timeout * HZ);
-		break;
-	case -EBUSY:
-		/* someone else made a mount here whilst we were busy */
-		while (d_mountpoint(nd->path.dentry) &&
-		       follow_down(&nd->path))
-			;
-		err = 0;
-	default:
-		mntput(newmnt);
-		break;
-	}
-
-	_leave(" = %d", err);
-	return ERR_PTR(err);
+	mntget(newmnt); /* prevent immediate expiration */
+	mnt_set_expiry(newmnt, &afs_vfsmounts);
+	queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
+			   afs_mntpt_expiry_timeout * HZ);
+	_leave(" = %p {%s}", newmnt, newmnt->mnt_devname);
+	return newmnt;
 }
 
 /*
diff --git a/fs/aio.c b/fs/aio.c
index 5e00f15..fc557a3 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -87,7 +87,7 @@
 
 	aio_wq = create_workqueue("aio");
 	abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
-	BUG_ON(!abe_pool);
+	BUG_ON(!aio_wq || !abe_pool);
 
 	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
 
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index cbe57f3..c5567cb 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -233,7 +233,7 @@
 	return 0;
 
 err_mntput:
-	mntput_long(anon_inode_mnt);
+	mntput(anon_inode_mnt);
 err_unregister_filesystem:
 	unregister_filesystem(&anon_inode_fs_type);
 err_exit:
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 0fffe1c..1f016bf 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -99,7 +99,6 @@
 };
 
 #define AUTOFS_INF_EXPIRING	(1<<0) /* dentry is in the process of expiring */
-#define AUTOFS_INF_MOUNTPOINT	(1<<1) /* mountpoint status for direct expire */
 #define AUTOFS_INF_PENDING	(1<<2) /* dentry pending mount */
 
 struct autofs_wait_queue {
@@ -176,13 +175,6 @@
 	return 0;
 }
 
-static inline void autofs4_copy_atime(struct file *src, struct file *dst)
-{
-	dst->f_path.dentry->d_inode->i_atime =
-		src->f_path.dentry->d_inode->i_atime;
-	return;
-}
-
 struct inode *autofs4_get_inode(struct super_block *, struct autofs_info *);
 void autofs4_free_ino(struct autofs_info *);
 
@@ -212,11 +204,83 @@
 
 extern const struct inode_operations autofs4_symlink_inode_operations;
 extern const struct inode_operations autofs4_dir_inode_operations;
-extern const struct inode_operations autofs4_root_inode_operations;
-extern const struct inode_operations autofs4_indirect_root_inode_operations;
-extern const struct inode_operations autofs4_direct_root_inode_operations;
 extern const struct file_operations autofs4_dir_operations;
 extern const struct file_operations autofs4_root_operations;
+extern const struct dentry_operations autofs4_dentry_operations;
+
+/* VFS automount flags management functions */
+
+static inline void __managed_dentry_set_automount(struct dentry *dentry)
+{
+	dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
+}
+
+static inline void managed_dentry_set_automount(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_set_automount(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_clear_automount(struct dentry *dentry)
+{
+	dentry->d_flags &= ~DCACHE_NEED_AUTOMOUNT;
+}
+
+static inline void managed_dentry_clear_automount(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_clear_automount(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_set_transit(struct dentry *dentry)
+{
+	dentry->d_flags |= DCACHE_MANAGE_TRANSIT;
+}
+
+static inline void managed_dentry_set_transit(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_set_transit(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_clear_transit(struct dentry *dentry)
+{
+	dentry->d_flags &= ~DCACHE_MANAGE_TRANSIT;
+}
+
+static inline void managed_dentry_clear_transit(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_clear_transit(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_set_managed(struct dentry *dentry)
+{
+	dentry->d_flags |= (DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT);
+}
+
+static inline void managed_dentry_set_managed(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_set_managed(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_clear_managed(struct dentry *dentry)
+{
+	dentry->d_flags &= ~(DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT);
+}
+
+static inline void managed_dentry_clear_managed(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_clear_managed(dentry);
+	spin_unlock(&dentry->d_lock);
+}
 
 /* Initializing function */
 
@@ -229,19 +293,6 @@
 int autofs4_wait_release(struct autofs_sb_info *,autofs_wqt_t,int);
 void autofs4_catatonic_mode(struct autofs_sb_info *);
 
-static inline int autofs4_follow_mount(struct path *path)
-{
-	int res = 0;
-
-	while (d_mountpoint(path->dentry)) {
-		int followed = follow_down(path);
-		if (!followed)
-			break;
-		res = 1;
-	}
-	return res;
-}
-
 static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi)
 {
 	return new_encode_dev(sbi->sb->s_dev);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index eff9a41..1442da4 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -551,7 +551,7 @@
 
 		err = have_submounts(path.dentry);
 
-		if (follow_down(&path))
+		if (follow_down_one(&path))
 			magic = path.mnt->mnt_sb->s_magic;
 	}
 
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index cc1d013..3ed79d7 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -26,10 +26,6 @@
 	if (ino == NULL)
 		return 0;
 
-	/* No point expiring a pending mount */
-	if (ino->flags & AUTOFS_INF_PENDING)
-		return 0;
-
 	if (!do_now) {
 		/* Too young to die */
 		if (!timeout || time_after(ino->last_used + timeout, now))
@@ -56,7 +52,7 @@
 
 	path_get(&path);
 
-	if (!follow_down(&path))
+	if (!follow_down_one(&path))
 		goto done;
 
 	if (is_autofs4_dentry(path.dentry)) {
@@ -283,6 +279,7 @@
 	unsigned long timeout;
 	struct dentry *root = dget(sb->s_root);
 	int do_now = how & AUTOFS_EXP_IMMEDIATE;
+	struct autofs_info *ino;
 
 	if (!root)
 		return NULL;
@@ -291,19 +288,21 @@
 	timeout = sbi->exp_timeout;
 
 	spin_lock(&sbi->fs_lock);
+	ino = autofs4_dentry_ino(root);
+	/* No point expiring a pending mount */
+	if (ino->flags & AUTOFS_INF_PENDING) {
+		spin_unlock(&sbi->fs_lock);
+		return NULL;
+	}
+	managed_dentry_set_transit(root);
 	if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
 		struct autofs_info *ino = autofs4_dentry_ino(root);
-		if (d_mountpoint(root)) {
-			ino->flags |= AUTOFS_INF_MOUNTPOINT;
-			spin_lock(&root->d_lock);
-			root->d_flags &= ~DCACHE_MOUNTED;
-			spin_unlock(&root->d_lock);
-		}
 		ino->flags |= AUTOFS_INF_EXPIRING;
 		init_completion(&ino->expire_complete);
 		spin_unlock(&sbi->fs_lock);
 		return root;
 	}
+	managed_dentry_clear_transit(root);
 	spin_unlock(&sbi->fs_lock);
 	dput(root);
 
@@ -340,6 +339,10 @@
 	while ((dentry = get_next_positive_dentry(dentry, root))) {
 		spin_lock(&sbi->fs_lock);
 		ino = autofs4_dentry_ino(dentry);
+		/* No point expiring a pending mount */
+		if (ino->flags & AUTOFS_INF_PENDING)
+			goto cont;
+		managed_dentry_set_transit(dentry);
 
 		/*
 		 * Case 1: (i) indirect mount or top level pseudo direct mount
@@ -399,6 +402,8 @@
 			}
 		}
 next:
+		managed_dentry_clear_transit(dentry);
+cont:
 		spin_unlock(&sbi->fs_lock);
 	}
 	return NULL;
@@ -479,6 +484,8 @@
 	spin_lock(&sbi->fs_lock);
 	ino = autofs4_dentry_ino(dentry);
 	ino->flags &= ~AUTOFS_INF_EXPIRING;
+	if (!d_unhashed(dentry))
+		managed_dentry_clear_transit(dentry);
 	complete_all(&ino->expire_complete);
 	spin_unlock(&sbi->fs_lock);
 
@@ -504,18 +511,18 @@
 		ret = autofs4_wait(sbi, dentry, NFY_EXPIRE);
 
 		spin_lock(&sbi->fs_lock);
-		if (ino->flags & AUTOFS_INF_MOUNTPOINT) {
-			spin_lock(&sb->s_root->d_lock);
-			/*
-			 * If we haven't been expired away, then reset
-			 * mounted status.
-			 */
-			if (mnt->mnt_parent != mnt)
-				sb->s_root->d_flags |= DCACHE_MOUNTED;
-			spin_unlock(&sb->s_root->d_lock);
-			ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
-		}
 		ino->flags &= ~AUTOFS_INF_EXPIRING;
+		spin_lock(&dentry->d_lock);
+		if (ret)
+			__managed_dentry_clear_transit(dentry);
+		else {
+			if ((IS_ROOT(dentry) ||
+			    (autofs_type_indirect(sbi->type) &&
+			     IS_ROOT(dentry->d_parent))) &&
+			    !(dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+				__managed_dentry_set_automount(dentry);
+		}
+		spin_unlock(&dentry->d_lock);
 		complete_all(&ino->expire_complete);
 		spin_unlock(&sbi->fs_lock);
 		dput(dentry);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index a7bdb9d..9e1a9da 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -45,7 +45,6 @@
 
 	if (!reinit) {
 		ino->flags = 0;
-		ino->inode = NULL;
 		ino->dentry = NULL;
 		ino->size = 0;
 		INIT_LIST_HEAD(&ino->active);
@@ -76,19 +75,8 @@
 
 void autofs4_free_ino(struct autofs_info *ino)
 {
-	struct autofs_info *p_ino;
-
 	if (ino->dentry) {
 		ino->dentry->d_fsdata = NULL;
-		if (ino->dentry->d_inode) {
-			struct dentry *parent = ino->dentry->d_parent;
-			if (atomic_dec_and_test(&ino->count)) {
-				p_ino = autofs4_dentry_ino(parent);
-				if (p_ino && parent != ino->dentry)
-					atomic_dec(&p_ino->count);
-			}
-			dput(ino->dentry);
-		}
 		ino->dentry = NULL;
 	}
 	if (ino->free)
@@ -251,10 +239,6 @@
 	return ino;
 }
 
-static const struct dentry_operations autofs4_sb_dentry_operations = {
-	.d_release      = autofs4_dentry_release,
-};
-
 int autofs4_fill_super(struct super_block *s, void *data, int silent)
 {
 	struct inode * root_inode;
@@ -292,6 +276,7 @@
 	s->s_blocksize_bits = 10;
 	s->s_magic = AUTOFS_SUPER_MAGIC;
 	s->s_op = &autofs4_sops;
+	s->s_d_op = &autofs4_dentry_operations;
 	s->s_time_gran = 1;
 
 	/*
@@ -309,7 +294,6 @@
 		goto fail_iput;
 	pipe = NULL;
 
-	d_set_d_op(root, &autofs4_sb_dentry_operations);
 	root->d_fsdata = ino;
 
 	/* Can this call block? */
@@ -320,10 +304,11 @@
 		goto fail_dput;
 	}
 
+	if (autofs_type_trigger(sbi->type))
+		__managed_dentry_set_managed(root);
+
 	root_inode->i_fop = &autofs4_root_operations;
-	root_inode->i_op = autofs_type_trigger(sbi->type) ?
-			&autofs4_direct_root_inode_operations :
-			&autofs4_indirect_root_inode_operations;
+	root_inode->i_op = &autofs4_dir_inode_operations;
 
 	/* Couldn't this be tested earlier? */
 	if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
@@ -391,7 +376,6 @@
 	if (inode == NULL)
 		return NULL;
 
-	inf->inode = inode;
 	inode->i_mode = inf->mode;
 	if (sb->s_root) {
 		inode->i_uid = sb->s_root->d_inode->i_uid;
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 651e4ef..1dba035 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -35,10 +35,8 @@
 #endif
 static int autofs4_dir_open(struct inode *inode, struct file *file);
 static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *);
-static void *autofs4_follow_link(struct dentry *, struct nameidata *);
-
-#define TRIGGER_FLAGS   (LOOKUP_CONTINUE | LOOKUP_DIRECTORY)
-#define TRIGGER_INTENTS (LOOKUP_OPEN | LOOKUP_CREATE)
+static struct vfsmount *autofs4_d_automount(struct path *);
+static int autofs4_d_manage(struct dentry *, bool, bool);
 
 const struct file_operations autofs4_root_operations = {
 	.open		= dcache_dir_open,
@@ -60,22 +58,6 @@
 	.llseek		= dcache_dir_lseek,
 };
 
-const struct inode_operations autofs4_indirect_root_inode_operations = {
-	.lookup		= autofs4_lookup,
-	.unlink		= autofs4_dir_unlink,
-	.symlink	= autofs4_dir_symlink,
-	.mkdir		= autofs4_dir_mkdir,
-	.rmdir		= autofs4_dir_rmdir,
-};
-
-const struct inode_operations autofs4_direct_root_inode_operations = {
-	.lookup		= autofs4_lookup,
-	.unlink		= autofs4_dir_unlink,
-	.mkdir		= autofs4_dir_mkdir,
-	.rmdir		= autofs4_dir_rmdir,
-	.follow_link	= autofs4_follow_link,
-};
-
 const struct inode_operations autofs4_dir_inode_operations = {
 	.lookup		= autofs4_lookup,
 	.unlink		= autofs4_dir_unlink,
@@ -84,6 +66,12 @@
 	.rmdir		= autofs4_dir_rmdir,
 };
 
+const struct dentry_operations autofs4_dentry_operations = {
+	.d_automount	= autofs4_d_automount,
+	.d_manage	= autofs4_d_manage,
+	.d_release	= autofs4_dentry_release,
+};
+
 static void autofs4_add_active(struct dentry *dentry)
 {
 	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
@@ -116,14 +104,6 @@
 	return;
 }
 
-static unsigned int autofs4_need_mount(unsigned int flags)
-{
-	unsigned int res = 0;
-	if (flags & (TRIGGER_FLAGS | TRIGGER_INTENTS))
-		res = 1;
-	return res;
-}
-
 static int autofs4_dir_open(struct inode *inode, struct file *file)
 {
 	struct dentry *dentry = file->f_path.dentry;
@@ -158,239 +138,6 @@
 	return dcache_dir_open(inode, file);
 }
 
-static int try_to_fill_dentry(struct dentry *dentry, int flags)
-{
-	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
-	struct autofs_info *ino = autofs4_dentry_ino(dentry);
-	int status;
-
-	DPRINTK("dentry=%p %.*s ino=%p",
-		 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
-
-	/*
-	 * Wait for a pending mount, triggering one if there
-	 * isn't one already
-	 */
-	if (dentry->d_inode == NULL) {
-		DPRINTK("waiting for mount name=%.*s",
-			 dentry->d_name.len, dentry->d_name.name);
-
-		status = autofs4_wait(sbi, dentry, NFY_MOUNT);
-
-		DPRINTK("mount done status=%d", status);
-
-		/* Turn this into a real negative dentry? */
-		if (status == -ENOENT) {
-			spin_lock(&sbi->fs_lock);
-			ino->flags &= ~AUTOFS_INF_PENDING;
-			spin_unlock(&sbi->fs_lock);
-			return status;
-		} else if (status) {
-			/* Return a negative dentry, but leave it "pending" */
-			return status;
-		}
-	/* Trigger mount for path component or follow link */
-	} else if (ino->flags & AUTOFS_INF_PENDING ||
-			autofs4_need_mount(flags)) {
-		DPRINTK("waiting for mount name=%.*s",
-			dentry->d_name.len, dentry->d_name.name);
-
-		spin_lock(&sbi->fs_lock);
-		ino->flags |= AUTOFS_INF_PENDING;
-		spin_unlock(&sbi->fs_lock);
-		status = autofs4_wait(sbi, dentry, NFY_MOUNT);
-
-		DPRINTK("mount done status=%d", status);
-
-		if (status) {
-			spin_lock(&sbi->fs_lock);
-			ino->flags &= ~AUTOFS_INF_PENDING;
-			spin_unlock(&sbi->fs_lock);
-			return status;
-		}
-	}
-
-	/* Initialize expiry counter after successful mount */
-	ino->last_used = jiffies;
-
-	spin_lock(&sbi->fs_lock);
-	ino->flags &= ~AUTOFS_INF_PENDING;
-	spin_unlock(&sbi->fs_lock);
-
-	return 0;
-}
-
-/* For autofs direct mounts the follow link triggers the mount */
-static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
-	struct autofs_info *ino = autofs4_dentry_ino(dentry);
-	int oz_mode = autofs4_oz_mode(sbi);
-	unsigned int lookup_type;
-	int status;
-
-	DPRINTK("dentry=%p %.*s oz_mode=%d nd->flags=%d",
-		dentry, dentry->d_name.len, dentry->d_name.name, oz_mode,
-		nd->flags);
-	/*
-	 * For an expire of a covered direct or offset mount we need
-	 * to break out of follow_down() at the autofs mount trigger
-	 * (d_mounted--), so we can see the expiring flag, and manage
-	 * the blocking and following here until the expire is completed.
-	 */
-	if (oz_mode) {
-		spin_lock(&sbi->fs_lock);
-		if (ino->flags & AUTOFS_INF_EXPIRING) {
-			spin_unlock(&sbi->fs_lock);
-			/* Follow down to our covering mount. */
-			if (!follow_down(&nd->path))
-				goto done;
-			goto follow;
-		}
-		spin_unlock(&sbi->fs_lock);
-		goto done;
-	}
-
-	/* If an expire request is pending everyone must wait. */
-	autofs4_expire_wait(dentry);
-
-	/* We trigger a mount for almost all flags */
-	lookup_type = autofs4_need_mount(nd->flags);
-	spin_lock(&sbi->fs_lock);
-	spin_lock(&autofs4_lock);
-	spin_lock(&dentry->d_lock);
-	if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) {
-		spin_unlock(&dentry->d_lock);
-		spin_unlock(&autofs4_lock);
-		spin_unlock(&sbi->fs_lock);
-		goto follow;
-	}
-
-	/*
-	 * If the dentry contains directories then it is an autofs
-	 * multi-mount with no root mount offset. So don't try to
-	 * mount it again.
-	 */
-	if (ino->flags & AUTOFS_INF_PENDING ||
-	    (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
-		spin_unlock(&dentry->d_lock);
-		spin_unlock(&autofs4_lock);
-		spin_unlock(&sbi->fs_lock);
-
-		status = try_to_fill_dentry(dentry, nd->flags);
-		if (status)
-			goto out_error;
-
-		goto follow;
-	}
-	spin_unlock(&dentry->d_lock);
-	spin_unlock(&autofs4_lock);
-	spin_unlock(&sbi->fs_lock);
-follow:
-	/*
-	 * If there is no root mount it must be an autofs
-	 * multi-mount with no root offset so we don't need
-	 * to follow it.
-	 */
-	if (d_mountpoint(dentry)) {
-		if (!autofs4_follow_mount(&nd->path)) {
-			status = -ENOENT;
-			goto out_error;
-		}
-	}
-
-done:
-	return NULL;
-
-out_error:
-	path_put(&nd->path);
-	return ERR_PTR(status);
-}
-
-/*
- * Revalidate is called on every cache lookup.  Some of those
- * cache lookups may actually happen while the dentry is not
- * yet completely filled in, and revalidate has to delay such
- * lookups..
- */
-static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
-{
-	struct inode *dir;
-	struct autofs_sb_info *sbi;
-	int oz_mode;
-	int flags = nd ? nd->flags : 0;
-	int status = 1;
-
-	if (flags & LOOKUP_RCU)
-		return -ECHILD;
-
-	dir = dentry->d_parent->d_inode;
-	sbi = autofs4_sbi(dir->i_sb);
-	oz_mode = autofs4_oz_mode(sbi);
-
-	/* Pending dentry */
-	spin_lock(&sbi->fs_lock);
-	if (autofs4_ispending(dentry)) {
-		/* The daemon never causes a mount to trigger */
-		spin_unlock(&sbi->fs_lock);
-
-		if (oz_mode)
-			return 1;
-
-		/*
-		 * If the directory has gone away due to an expire
-		 * we have been called as ->d_revalidate() and so
-		 * we need to return false and proceed to ->lookup().
-		 */
-		if (autofs4_expire_wait(dentry) == -EAGAIN)
-			return 0;
-
-		/*
-		 * A zero status is success otherwise we have a
-		 * negative error code.
-		 */
-		status = try_to_fill_dentry(dentry, flags);
-		if (status == 0)
-			return 1;
-
-		return status;
-	}
-	spin_unlock(&sbi->fs_lock);
-
-	/* Negative dentry.. invalidate if "old" */
-	if (dentry->d_inode == NULL)
-		return 0;
-
-	/* Check for a non-mountpoint directory with no contents */
-	spin_lock(&autofs4_lock);
-	spin_lock(&dentry->d_lock);
-	if (S_ISDIR(dentry->d_inode->i_mode) &&
-	    !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
-		DPRINTK("dentry=%p %.*s, emptydir",
-			 dentry, dentry->d_name.len, dentry->d_name.name);
-		spin_unlock(&dentry->d_lock);
-		spin_unlock(&autofs4_lock);
-
-		/* The daemon never causes a mount to trigger */
-		if (oz_mode)
-			return 1;
-
-		/*
-		 * A zero status is success otherwise we have a
-		 * negative error code.
-		 */
-		status = try_to_fill_dentry(dentry, flags);
-		if (status == 0)
-			return 1;
-
-		return status;
-	}
-	spin_unlock(&dentry->d_lock);
-	spin_unlock(&autofs4_lock);
-
-	return 1;
-}
-
 void autofs4_dentry_release(struct dentry *de)
 {
 	struct autofs_info *inf;
@@ -398,11 +145,8 @@
 	DPRINTK("releasing %p", de);
 
 	inf = autofs4_dentry_ino(de);
-	de->d_fsdata = NULL;
-
 	if (inf) {
 		struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb);
-
 		if (sbi) {
 			spin_lock(&sbi->lookup_lock);
 			if (!list_empty(&inf->active))
@@ -411,26 +155,10 @@
 				list_del(&inf->expiring);
 			spin_unlock(&sbi->lookup_lock);
 		}
-
-		inf->dentry = NULL;
-		inf->inode = NULL;
-
 		autofs4_free_ino(inf);
 	}
 }
 
-/* For dentries of directories in the root dir */
-static const struct dentry_operations autofs4_root_dentry_operations = {
-	.d_revalidate	= autofs4_revalidate,
-	.d_release	= autofs4_dentry_release,
-};
-
-/* For other dentries */
-static const struct dentry_operations autofs4_dentry_operations = {
-	.d_revalidate	= autofs4_revalidate,
-	.d_release	= autofs4_dentry_release,
-};
-
 static struct dentry *autofs4_lookup_active(struct dentry *dentry)
 {
 	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
@@ -541,50 +269,244 @@
 	return NULL;
 }
 
+static int autofs4_mount_wait(struct dentry *dentry)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	int status;
+
+	if (ino->flags & AUTOFS_INF_PENDING) {
+		DPRINTK("waiting for mount name=%.*s",
+			dentry->d_name.len, dentry->d_name.name);
+		status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+		DPRINTK("mount wait done status=%d", status);
+		ino->last_used = jiffies;
+		return status;
+	}
+	return 0;
+}
+
+static int do_expire_wait(struct dentry *dentry)
+{
+	struct dentry *expiring;
+
+	expiring = autofs4_lookup_expiring(dentry);
+	if (!expiring)
+		return autofs4_expire_wait(dentry);
+	else {
+		/*
+		 * If we are racing with expire the request might not
+		 * be quite complete, but the directory has been removed
+		 * so it must have been successful, just wait for it.
+		 */
+		autofs4_expire_wait(expiring);
+		autofs4_del_expiring(expiring);
+		dput(expiring);
+	}
+	return 0;
+}
+
+static struct dentry *autofs4_mountpoint_changed(struct path *path)
+{
+	struct dentry *dentry = path->dentry;
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+
+	/*
+	 * If this is an indirect mount the dentry could have gone away
+	 * as a result of an expire and a new one created.
+	 */
+	if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
+		struct dentry *parent = dentry->d_parent;
+		struct dentry *new = d_lookup(parent, &dentry->d_name);
+		if (!new)
+			return NULL;
+		dput(path->dentry);
+		path->dentry = new;
+	}
+	return path->dentry;
+}
+
+static struct vfsmount *autofs4_d_automount(struct path *path)
+{
+	struct dentry *dentry = path->dentry;
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	int status;
+
+	DPRINTK("dentry=%p %.*s",
+		dentry, dentry->d_name.len, dentry->d_name.name);
+
+	/*
+	 * Someone may have manually umounted this or it was a submount
+	 * that has gone away.
+	 */
+	spin_lock(&dentry->d_lock);
+	if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
+		if (!(dentry->d_flags & DCACHE_MANAGE_TRANSIT) &&
+		     (dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+			__managed_dentry_set_transit(path->dentry);
+	}
+	spin_unlock(&dentry->d_lock);
+
+	/* The daemon never triggers a mount. */
+	if (autofs4_oz_mode(sbi))
+		return NULL;
+
+	/*
+	 * If an expire request is pending everyone must wait.
+	 * If the expire fails we're still mounted so continue
+	 * the follow and return. A return of -EAGAIN (which only
+	 * happens with indirect mounts) means the expire completed
+	 * and the directory was removed, so just go ahead and try
+	 * the mount.
+	 */
+	status = do_expire_wait(dentry);
+	if (status && status != -EAGAIN)
+		return NULL;
+
+	/* Callback to the daemon to perform the mount or wait */
+	spin_lock(&sbi->fs_lock);
+	if (ino->flags & AUTOFS_INF_PENDING) {
+		spin_unlock(&sbi->fs_lock);
+		status = autofs4_mount_wait(dentry);
+		if (status)
+			return ERR_PTR(status);
+		spin_lock(&sbi->fs_lock);
+		goto done;
+	}
+
+	/*
+	 * If the dentry is a symlink it's equivalent to a directory
+	 * having d_mountpoint() true, so there's no need to call back
+	 * to the daemon.
+	 */
+	if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))
+		goto done;
+	if (!d_mountpoint(dentry)) {
+		/*
+		 * It's possible that user space hasn't removed directories
+		 * after umounting a rootless multi-mount, although it
+		 * should. For v5 have_submounts() is sufficient to handle
+		 * this because the leaves of the directory tree under the
+		 * mount never trigger mounts themselves (they have an autofs
+		 * trigger mount mounted on them). But v4 pseudo direct mounts
+		 * do need the leaves to to trigger mounts. In this case we
+		 * have no choice but to use the list_empty() check and
+		 * require user space behave.
+		 */
+		if (sbi->version > 4) {
+			if (have_submounts(dentry))
+				goto done;
+		} else {
+			spin_lock(&dentry->d_lock);
+			if (!list_empty(&dentry->d_subdirs)) {
+				spin_unlock(&dentry->d_lock);
+				goto done;
+			}
+			spin_unlock(&dentry->d_lock);
+		}
+		ino->flags |= AUTOFS_INF_PENDING;
+		spin_unlock(&sbi->fs_lock);
+		status = autofs4_mount_wait(dentry);
+		if (status)
+			return ERR_PTR(status);
+		spin_lock(&sbi->fs_lock);
+		ino->flags &= ~AUTOFS_INF_PENDING;
+	}
+done:
+	if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
+		/*
+		 * Any needed mounting has been completed and the path updated
+		 * so turn this into a normal dentry so we don't continually
+		 * call ->d_automount() and ->d_manage().
+		 */
+		spin_lock(&dentry->d_lock);
+		__managed_dentry_clear_transit(dentry);
+		/*
+		 * Only clear DMANAGED_AUTOMOUNT for rootless multi-mounts and
+		 * symlinks as in all other cases the dentry will be covered by
+		 * an actual mount so ->d_automount() won't be called during
+		 * the follow.
+		 */
+		if ((!d_mountpoint(dentry) &&
+		    !list_empty(&dentry->d_subdirs)) ||
+		    (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
+			__managed_dentry_clear_automount(dentry);
+		spin_unlock(&dentry->d_lock);
+	}
+	spin_unlock(&sbi->fs_lock);
+
+	/* Mount succeeded, check if we ended up with a new dentry */
+	dentry = autofs4_mountpoint_changed(path);
+	if (!dentry)
+		return ERR_PTR(-ENOENT);
+
+	return NULL;
+}
+
+int autofs4_d_manage(struct dentry *dentry, bool mounting_here, bool rcu_walk)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+
+	DPRINTK("dentry=%p %.*s",
+		dentry, dentry->d_name.len, dentry->d_name.name);
+
+	/* The daemon never waits. */
+	if (autofs4_oz_mode(sbi) || mounting_here) {
+		if (!d_mountpoint(dentry))
+			return -EISDIR;
+		return 0;
+	}
+
+	/* We need to sleep, so we need pathwalk to be in ref-mode */
+	if (rcu_walk)
+		return -ECHILD;
+
+	/* Wait for pending expires */
+	do_expire_wait(dentry);
+
+	/*
+	 * This dentry may be under construction so wait on mount
+	 * completion.
+	 */
+	return autofs4_mount_wait(dentry);
+}
+
 /* Lookups in the root directory */
 static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
 	struct autofs_sb_info *sbi;
 	struct autofs_info *ino;
-	struct dentry *expiring, *active;
-	int oz_mode;
+	struct dentry *active;
 
-	DPRINTK("name = %.*s",
-		dentry->d_name.len, dentry->d_name.name);
+	DPRINTK("name = %.*s", dentry->d_name.len, dentry->d_name.name);
 
 	/* File name too long to exist */
 	if (dentry->d_name.len > NAME_MAX)
 		return ERR_PTR(-ENAMETOOLONG);
 
 	sbi = autofs4_sbi(dir->i_sb);
-	oz_mode = autofs4_oz_mode(sbi);
 
 	DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
-		 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
+		current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
 
 	active = autofs4_lookup_active(dentry);
 	if (active) {
-		dentry = active;
-		ino = autofs4_dentry_ino(dentry);
+		return active;
 	} else {
 		/*
-		 * Mark the dentry incomplete but don't hash it. We do this
-		 * to serialize our inode creation operations (symlink and
-		 * mkdir) which prevents deadlock during the callback to
-		 * the daemon. Subsequent user space lookups for the same
-		 * dentry are placed on the wait queue while the daemon
-		 * itself is allowed passage unresticted so the create
-		 * operation itself can then hash the dentry. Finally,
-		 * we check for the hashed dentry and return the newly
-		 * hashed dentry.
+		 * A dentry that is not within the root can never trigger a
+		 * mount operation, unless the directory already exists, so we
+		 * can return fail immediately.  The daemon however does need
+		 * to create directories within the file system.
 		 */
-		d_set_d_op(dentry, &autofs4_root_dentry_operations);
+		if (!autofs4_oz_mode(sbi) && !IS_ROOT(dentry->d_parent))
+			return ERR_PTR(-ENOENT);
 
-		/*
-		 * And we need to ensure that the same dentry is used for
-		 * all following lookup calls until it is hashed so that
-		 * the dentry flags are persistent throughout the request.
-		 */
+		/* Mark entries in the root as mount triggers */
+		if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent))
+			__managed_dentry_set_managed(dentry);
+
 		ino = autofs4_init_ino(NULL, sbi, 0555);
 		if (!ino)
 			return ERR_PTR(-ENOMEM);
@@ -596,82 +518,6 @@
 
 		d_instantiate(dentry, NULL);
 	}
-
-	if (!oz_mode) {
-		mutex_unlock(&dir->i_mutex);
-		expiring = autofs4_lookup_expiring(dentry);
-		if (expiring) {
-			/*
-			 * If we are racing with expire the request might not
-			 * be quite complete but the directory has been removed
-			 * so it must have been successful, so just wait for it.
-			 */
-			autofs4_expire_wait(expiring);
-			autofs4_del_expiring(expiring);
-			dput(expiring);
-		}
-
-		spin_lock(&sbi->fs_lock);
-		ino->flags |= AUTOFS_INF_PENDING;
-		spin_unlock(&sbi->fs_lock);
-		if (dentry->d_op && dentry->d_op->d_revalidate)
-			(dentry->d_op->d_revalidate)(dentry, nd);
-		mutex_lock(&dir->i_mutex);
-	}
-
-	/*
-	 * If we are still pending, check if we had to handle
-	 * a signal. If so we can force a restart..
-	 */
-	if (ino->flags & AUTOFS_INF_PENDING) {
-		/* See if we were interrupted */
-		if (signal_pending(current)) {
-			sigset_t *sigset = &current->pending.signal;
-			if (sigismember (sigset, SIGKILL) ||
-			    sigismember (sigset, SIGQUIT) ||
-			    sigismember (sigset, SIGINT)) {
-			    if (active)
-				dput(active);
-			    return ERR_PTR(-ERESTARTNOINTR);
-			}
-		}
-		if (!oz_mode) {
-			spin_lock(&sbi->fs_lock);
-			ino->flags &= ~AUTOFS_INF_PENDING;
-			spin_unlock(&sbi->fs_lock);
-		}
-	}
-
-	/*
-	 * If this dentry is unhashed, then we shouldn't honour this
-	 * lookup.  Returning ENOENT here doesn't do the right thing
-	 * for all system calls, but it should be OK for the operations
-	 * we permit from an autofs.
-	 */
-	if (!oz_mode && d_unhashed(dentry)) {
-		/*
-		 * A user space application can (and has done in the past)
-		 * remove and re-create this directory during the callback.
-		 * This can leave us with an unhashed dentry, but a
-		 * successful mount!  So we need to perform another
-		 * cached lookup in case the dentry now exists.
-		 */
-		struct dentry *parent = dentry->d_parent;
-		struct dentry *new = d_lookup(parent, &dentry->d_name);
-		if (new != NULL)
-			dentry = new;
-		else
-			dentry = ERR_PTR(-ENOENT);
-
-		if (active)
-			dput(active);
-
-		return dentry;
-	}
-
-	if (active)
-		return active;
-
 	return NULL;
 }
 
@@ -716,18 +562,12 @@
 	}
 	d_add(dentry, inode);
 
-	if (dir == dir->i_sb->s_root->d_inode)
-		d_set_d_op(dentry, &autofs4_root_dentry_operations);
-	else
-		d_set_d_op(dentry, &autofs4_dentry_operations);
-
 	dentry->d_fsdata = ino;
 	ino->dentry = dget(dentry);
 	atomic_inc(&ino->count);
 	p_ino = autofs4_dentry_ino(dentry->d_parent);
 	if (p_ino && dentry->d_parent != dentry)
 		atomic_inc(&p_ino->count);
-	ino->inode = inode;
 
 	ino->u.symlink = cp;
 	dir->i_mtime = CURRENT_TIME;
@@ -782,6 +622,58 @@
 	return 0;
 }
 
+/*
+ * Version 4 of autofs provides a pseudo direct mount implementation
+ * that relies on directories at the leaves of a directory tree under
+ * an indirect mount to trigger mounts. To allow for this we need to
+ * set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves
+ * of the directory tree. There is no need to clear the automount flag
+ * following a mount or restore it after an expire because these mounts
+ * are always covered. However, it is neccessary to ensure that these
+ * flags are clear on non-empty directories to avoid unnecessary calls
+ * during path walks.
+ */
+static void autofs_set_leaf_automount_flags(struct dentry *dentry)
+{
+	struct dentry *parent;
+
+	/* root and dentrys in the root are already handled */
+	if (IS_ROOT(dentry->d_parent))
+		return;
+
+	managed_dentry_set_managed(dentry);
+
+	parent = dentry->d_parent;
+	/* only consider parents below dentrys in the root */
+	if (IS_ROOT(parent->d_parent))
+		return;
+	managed_dentry_clear_managed(parent);
+	return;
+}
+
+static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
+{
+	struct list_head *d_child;
+	struct dentry *parent;
+
+	/* flags for dentrys in the root are handled elsewhere */
+	if (IS_ROOT(dentry->d_parent))
+		return;
+
+	managed_dentry_clear_managed(dentry);
+
+	parent = dentry->d_parent;
+	/* only consider parents below dentrys in the root */
+	if (IS_ROOT(parent->d_parent))
+		return;
+	d_child = &dentry->d_u.d_child;
+	/* Set parent managed if it's becoming empty */
+	if (d_child->next == &parent->d_subdirs &&
+	    d_child->prev == &parent->d_subdirs)
+		managed_dentry_set_managed(parent);
+	return;
+}
+
 static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
 {
 	struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
@@ -809,6 +701,9 @@
 	spin_unlock(&dentry->d_lock);
 	spin_unlock(&autofs4_lock);
 
+	if (sbi->version < 5)
+		autofs_clear_leaf_automount_flags(dentry);
+
 	if (atomic_dec_and_test(&ino->count)) {
 		p_ino = autofs4_dentry_ino(dentry->d_parent);
 		if (p_ino && dentry->d_parent != dentry)
@@ -851,10 +746,8 @@
 	}
 	d_add(dentry, inode);
 
-	if (dir == dir->i_sb->s_root->d_inode)
-		d_set_d_op(dentry, &autofs4_root_dentry_operations);
-	else
-		d_set_d_op(dentry, &autofs4_dentry_operations);
+	if (sbi->version < 5)
+		autofs_set_leaf_automount_flags(dentry);
 
 	dentry->d_fsdata = ino;
 	ino->dentry = dget(dentry);
@@ -862,7 +755,6 @@
 	p_ino = autofs4_dentry_ino(dentry->d_parent);
 	if (p_ino && dentry->d_parent != dentry)
 		atomic_inc(&p_ino->count);
-	ino->inode = inode;
 	inc_nlink(dir);
 	dir->i_mtime = CURRENT_TIME;
 
@@ -944,8 +836,7 @@
 int is_autofs4_dentry(struct dentry *dentry)
 {
 	return dentry && dentry->d_inode &&
-		(dentry->d_op == &autofs4_root_dentry_operations ||
-		 dentry->d_op == &autofs4_dentry_operations) &&
+		dentry->d_op == &autofs4_dentry_operations &&
 		dentry->d_fsdata != NULL;
 }
 
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index c5f8459..5601005 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -309,6 +309,9 @@
 	 * completed while we waited on the mutex ...
 	 */
 	if (notify == NFY_MOUNT) {
+		struct dentry *new = NULL;
+		int valid = 1;
+
 		/*
 		 * If the dentry was successfully mounted while we slept
 		 * on the wait queue mutex we can return success. If it
@@ -316,8 +319,20 @@
 		 * a multi-mount with no mount at it's base) we can
 		 * continue on and create a new request.
 		 */
+		if (!IS_ROOT(dentry)) {
+			if (dentry->d_inode && d_unhashed(dentry)) {
+				struct dentry *parent = dentry->d_parent;
+				new = d_lookup(parent, &dentry->d_name);
+				if (new)
+					dentry = new;
+			}
+		}
 		if (have_submounts(dentry))
-			return 0;
+			valid = 0;
+
+		if (new)
+			dput(new);
+		return valid;
 	}
 
 	return 1;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index fe3f59c1..333a7bb 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -432,6 +432,9 @@
 	mutex_init(&bdev->bd_mutex);
 	INIT_LIST_HEAD(&bdev->bd_inodes);
 	INIT_LIST_HEAD(&bdev->bd_list);
+#ifdef CONFIG_SYSFS
+	INIT_LIST_HEAD(&bdev->bd_holder_disks);
+#endif
 	inode_init_once(&ei->vfs_inode);
 	/* Initialize mutex for freeze. */
 	mutex_init(&bdev->bd_fsfreeze_mutex);
@@ -779,6 +782,23 @@
 }
 
 #ifdef CONFIG_SYSFS
+struct bd_holder_disk {
+	struct list_head	list;
+	struct gendisk		*disk;
+	int			refcnt;
+};
+
+static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
+						  struct gendisk *disk)
+{
+	struct bd_holder_disk *holder;
+
+	list_for_each_entry(holder, &bdev->bd_holder_disks, list)
+		if (holder->disk == disk)
+			return holder;
+	return NULL;
+}
+
 static int add_symlink(struct kobject *from, struct kobject *to)
 {
 	return sysfs_create_link(from, to, kobject_name(to));
@@ -794,6 +814,8 @@
  * @bdev: the claimed slave bdev
  * @disk: the holding disk
  *
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
+ *
  * This functions creates the following sysfs symlinks.
  *
  * - from "slaves" directory of the holder @disk to the claimed @bdev
@@ -817,47 +839,83 @@
  */
 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
 {
+	struct bd_holder_disk *holder;
 	int ret = 0;
 
 	mutex_lock(&bdev->bd_mutex);
 
-	WARN_ON_ONCE(!bdev->bd_holder || bdev->bd_holder_disk);
+	WARN_ON_ONCE(!bdev->bd_holder);
 
 	/* FIXME: remove the following once add_disk() handles errors */
 	if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
 		goto out_unlock;
 
-	ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
-	if (ret)
-		goto out_unlock;
-
-	ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
-	if (ret) {
-		del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+	holder = bd_find_holder_disk(bdev, disk);
+	if (holder) {
+		holder->refcnt++;
 		goto out_unlock;
 	}
 
-	bdev->bd_holder_disk = disk;
+	holder = kzalloc(sizeof(*holder), GFP_KERNEL);
+	if (!holder) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	INIT_LIST_HEAD(&holder->list);
+	holder->disk = disk;
+	holder->refcnt = 1;
+
+	ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+	if (ret)
+		goto out_free;
+
+	ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
+	if (ret)
+		goto out_del;
+
+	list_add(&holder->list, &bdev->bd_holder_disks);
+	goto out_unlock;
+
+out_del:
+	del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+out_free:
+	kfree(holder);
 out_unlock:
 	mutex_unlock(&bdev->bd_mutex);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(bd_link_disk_holder);
 
-static void bd_unlink_disk_holder(struct block_device *bdev)
+/**
+ * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
+ * @bdev: the calimed slave bdev
+ * @disk: the holding disk
+ *
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
 {
-	struct gendisk *disk = bdev->bd_holder_disk;
+	struct bd_holder_disk *holder;
 
-	bdev->bd_holder_disk = NULL;
-	if (!disk)
-		return;
+	mutex_lock(&bdev->bd_mutex);
 
-	del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
-	del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
+	holder = bd_find_holder_disk(bdev, disk);
+
+	if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
+		del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+		del_symlink(bdev->bd_part->holder_dir,
+			    &disk_to_dev(disk)->kobj);
+		list_del_init(&holder->list);
+		kfree(holder);
+	}
+
+	mutex_unlock(&bdev->bd_mutex);
 }
-#else
-static inline void bd_unlink_disk_holder(struct block_device *bdev)
-{ }
+EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
 #endif
 
 /**
@@ -1380,7 +1438,6 @@
 		 * unblock evpoll if it was a write holder.
 		 */
 		if (bdev_free) {
-			bd_unlink_disk_holder(bdev);
 			if (bdev->bd_write_holder) {
 				disk_unblock_events(bdev->bd_disk);
 				bdev->bd_write_holder = false;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index a142d20..b875d44 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -27,6 +27,7 @@
 #include <linux/backing-dev.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
+#include <linux/kobject.h>
 #include <asm/kmap_types.h>
 #include "extent_io.h"
 #include "extent_map.h"
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 66836d8..a9e0a4ea 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -24,6 +24,7 @@
 #include <linux/string.h>
 #include <linux/backing-dev.h>
 #include <linux/mpage.h>
+#include <linux/falloc.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
 #include <linux/statfs.h>
@@ -1237,6 +1238,117 @@
 	return 0;
 }
 
+static long btrfs_fallocate(struct file *file, int mode,
+			    loff_t offset, loff_t len)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct extent_state *cached_state = NULL;
+	u64 cur_offset;
+	u64 last_byte;
+	u64 alloc_start;
+	u64 alloc_end;
+	u64 alloc_hint = 0;
+	u64 locked_end;
+	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
+	struct extent_map *em;
+	int ret;
+
+	alloc_start = offset & ~mask;
+	alloc_end =  (offset + len + mask) & ~mask;
+
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode & ~FALLOC_FL_KEEP_SIZE)
+		return -EOPNOTSUPP;
+
+	/*
+	 * wait for ordered IO before we have any locks.  We'll loop again
+	 * below with the locks held.
+	 */
+	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
+
+	mutex_lock(&inode->i_mutex);
+	ret = inode_newsize_ok(inode, alloc_end);
+	if (ret)
+		goto out;
+
+	if (alloc_start > inode->i_size) {
+		ret = btrfs_cont_expand(inode, alloc_start);
+		if (ret)
+			goto out;
+	}
+
+	ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
+	if (ret)
+		goto out;
+
+	locked_end = alloc_end - 1;
+	while (1) {
+		struct btrfs_ordered_extent *ordered;
+
+		/* the extent lock is ordered inside the running
+		 * transaction
+		 */
+		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
+				 locked_end, 0, &cached_state, GFP_NOFS);
+		ordered = btrfs_lookup_first_ordered_extent(inode,
+							    alloc_end - 1);
+		if (ordered &&
+		    ordered->file_offset + ordered->len > alloc_start &&
+		    ordered->file_offset < alloc_end) {
+			btrfs_put_ordered_extent(ordered);
+			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+					     alloc_start, locked_end,
+					     &cached_state, GFP_NOFS);
+			/*
+			 * we can't wait on the range with the transaction
+			 * running or with the extent lock held
+			 */
+			btrfs_wait_ordered_range(inode, alloc_start,
+						 alloc_end - alloc_start);
+		} else {
+			if (ordered)
+				btrfs_put_ordered_extent(ordered);
+			break;
+		}
+	}
+
+	cur_offset = alloc_start;
+	while (1) {
+		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
+				      alloc_end - cur_offset, 0);
+		BUG_ON(IS_ERR(em) || !em);
+		last_byte = min(extent_map_end(em), alloc_end);
+		last_byte = (last_byte + mask) & ~mask;
+		if (em->block_start == EXTENT_MAP_HOLE ||
+		    (cur_offset >= inode->i_size &&
+		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
+			ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
+							last_byte - cur_offset,
+							1 << inode->i_blkbits,
+							offset + len,
+							&alloc_hint);
+			if (ret < 0) {
+				free_extent_map(em);
+				break;
+			}
+		}
+		free_extent_map(em);
+
+		cur_offset = last_byte;
+		if (cur_offset >= alloc_end) {
+			ret = 0;
+			break;
+		}
+	}
+	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+			     &cached_state, GFP_NOFS);
+
+	btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
+out:
+	mutex_unlock(&inode->i_mutex);
+	return ret;
+}
+
 const struct file_operations btrfs_file_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= do_sync_read,
@@ -1248,6 +1360,7 @@
 	.open		= generic_file_open,
 	.release	= btrfs_release_file,
 	.fsync		= btrfs_sync_file,
+	.fallocate	= btrfs_fallocate,
 	.unlocked_ioctl	= btrfs_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= btrfs_ioctl,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a3798a3..902afbf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7098,116 +7098,6 @@
 					   min_size, actual_len, alloc_hint, trans);
 }
 
-static long btrfs_fallocate(struct inode *inode, int mode,
-			    loff_t offset, loff_t len)
-{
-	struct extent_state *cached_state = NULL;
-	u64 cur_offset;
-	u64 last_byte;
-	u64 alloc_start;
-	u64 alloc_end;
-	u64 alloc_hint = 0;
-	u64 locked_end;
-	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
-	struct extent_map *em;
-	int ret;
-
-	alloc_start = offset & ~mask;
-	alloc_end =  (offset + len + mask) & ~mask;
-
-	/* We only support the FALLOC_FL_KEEP_SIZE mode */
-	if (mode && (mode != FALLOC_FL_KEEP_SIZE))
-		return -EOPNOTSUPP;
-
-	/*
-	 * wait for ordered IO before we have any locks.  We'll loop again
-	 * below with the locks held.
-	 */
-	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
-
-	mutex_lock(&inode->i_mutex);
-	ret = inode_newsize_ok(inode, alloc_end);
-	if (ret)
-		goto out;
-
-	if (alloc_start > inode->i_size) {
-		ret = btrfs_cont_expand(inode, alloc_start);
-		if (ret)
-			goto out;
-	}
-
-	ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
-	if (ret)
-		goto out;
-
-	locked_end = alloc_end - 1;
-	while (1) {
-		struct btrfs_ordered_extent *ordered;
-
-		/* the extent lock is ordered inside the running
-		 * transaction
-		 */
-		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
-				 locked_end, 0, &cached_state, GFP_NOFS);
-		ordered = btrfs_lookup_first_ordered_extent(inode,
-							    alloc_end - 1);
-		if (ordered &&
-		    ordered->file_offset + ordered->len > alloc_start &&
-		    ordered->file_offset < alloc_end) {
-			btrfs_put_ordered_extent(ordered);
-			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
-					     alloc_start, locked_end,
-					     &cached_state, GFP_NOFS);
-			/*
-			 * we can't wait on the range with the transaction
-			 * running or with the extent lock held
-			 */
-			btrfs_wait_ordered_range(inode, alloc_start,
-						 alloc_end - alloc_start);
-		} else {
-			if (ordered)
-				btrfs_put_ordered_extent(ordered);
-			break;
-		}
-	}
-
-	cur_offset = alloc_start;
-	while (1) {
-		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
-				      alloc_end - cur_offset, 0);
-		BUG_ON(IS_ERR(em) || !em);
-		last_byte = min(extent_map_end(em), alloc_end);
-		last_byte = (last_byte + mask) & ~mask;
-		if (em->block_start == EXTENT_MAP_HOLE ||
-		    (cur_offset >= inode->i_size &&
-		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
-			ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
-							last_byte - cur_offset,
-							1 << inode->i_blkbits,
-							offset + len,
-							&alloc_hint);
-			if (ret < 0) {
-				free_extent_map(em);
-				break;
-			}
-		}
-		free_extent_map(em);
-
-		cur_offset = last_byte;
-		if (cur_offset >= alloc_end) {
-			ret = 0;
-			break;
-		}
-	}
-	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
-			     &cached_state, GFP_NOFS);
-
-	btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
-out:
-	mutex_unlock(&inode->i_mutex);
-	return ret;
-}
-
 static int btrfs_set_page_dirty(struct page *page)
 {
 	return __set_page_dirty_nobuffers(page);
@@ -7310,7 +7200,6 @@
 	.listxattr      = btrfs_listxattr,
 	.removexattr	= btrfs_removexattr,
 	.permission	= btrfs_permission,
-	.fallocate	= btrfs_fallocate,
 	.fiemap		= btrfs_fiemap,
 };
 static const struct inode_operations btrfs_special_inode_operations = {
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index c68a056..7ed3653 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -255,35 +255,6 @@
 
 }
 
-static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
-				struct list_head *mntlist)
-{
-	/* stolen from afs code */
-	int err;
-
-	mntget(newmnt);
-	err = do_add_mount(newmnt, &nd->path, nd->path.mnt->mnt_flags | MNT_SHRINKABLE, mntlist);
-	switch (err) {
-	case 0:
-		path_put(&nd->path);
-		nd->path.mnt = newmnt;
-		nd->path.dentry = dget(newmnt->mnt_root);
-		schedule_delayed_work(&cifs_dfs_automount_task,
-				      cifs_dfs_mountpoint_expiry_timeout);
-		break;
-	case -EBUSY:
-		/* someone else made a mount here whilst we were busy */
-		while (d_mountpoint(nd->path.dentry) &&
-		       follow_down(&nd->path))
-			;
-		err = 0;
-	default:
-		mntput(newmnt);
-		break;
-	}
-	return err;
-}
-
 static void dump_referral(const struct dfs_info3_param *ref)
 {
 	cFYI(1, "DFS: ref path: %s", ref->path_name);
@@ -293,45 +264,43 @@
 				ref->path_consumed);
 }
 
-
-static void*
-cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+/*
+ * Create a vfsmount that we can automount
+ */
+static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
 {
 	struct dfs_info3_param *referrals = NULL;
 	unsigned int num_referrals = 0;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsSesInfo *ses;
-	char *full_path = NULL;
+	char *full_path;
 	int xid, i;
-	int rc = 0;
-	struct vfsmount *mnt = ERR_PTR(-ENOENT);
+	int rc;
+	struct vfsmount *mnt;
 	struct tcon_link *tlink;
 
 	cFYI(1, "in %s", __func__);
-	BUG_ON(IS_ROOT(dentry));
+	BUG_ON(IS_ROOT(mntpt));
 
 	xid = GetXid();
 
-	dput(nd->path.dentry);
-	nd->path.dentry = dget(dentry);
-
 	/*
 	 * The MSDFS spec states that paths in DFS referral requests and
 	 * responses must be prefixed by a single '\' character instead of
 	 * the double backslashes usually used in the UNC. This function
 	 * gives us the latter, so we must adjust the result.
 	 */
-	full_path = build_path_from_dentry(dentry);
-	if (full_path == NULL) {
-		rc = -ENOMEM;
-		goto out_err;
-	}
+	mnt = ERR_PTR(-ENOMEM);
+	full_path = build_path_from_dentry(mntpt);
+	if (full_path == NULL)
+		goto free_xid;
 
-	cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
+	cifs_sb = CIFS_SB(mntpt->d_inode->i_sb);
 	tlink = cifs_sb_tlink(cifs_sb);
+	mnt = ERR_PTR(-EINVAL);
 	if (IS_ERR(tlink)) {
-		rc = PTR_ERR(tlink);
-		goto out_err;
+		mnt = ERR_CAST(tlink);
+		goto free_full_path;
 	}
 	ses = tlink_tcon(tlink)->ses;
 
@@ -341,46 +310,63 @@
 
 	cifs_put_tlink(tlink);
 
+	mnt = ERR_PTR(-ENOENT);
 	for (i = 0; i < num_referrals; i++) {
 		int len;
-		dump_referral(referrals+i);
+		dump_referral(referrals + i);
 		/* connect to a node */
 		len = strlen(referrals[i].node_name);
 		if (len < 2) {
 			cERROR(1, "%s: Net Address path too short: %s",
 					__func__, referrals[i].node_name);
-			rc = -EINVAL;
-			goto out_err;
+			mnt = ERR_PTR(-EINVAL);
+			break;
 		}
 		mnt = cifs_dfs_do_refmount(cifs_sb,
 				full_path, referrals + i);
 		cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__,
 					referrals[i].node_name, mnt);
-
-		/* complete mount procedure if we accured submount */
 		if (!IS_ERR(mnt))
-			break;
+			goto success;
 	}
 
-	/* we need it cause for() above could exit without valid submount */
-	rc = PTR_ERR(mnt);
-	if (IS_ERR(mnt))
-		goto out_err;
+	/* no valid submounts were found; return error from get_dfs_path() by
+	 * preference */
+	if (rc != 0)
+		mnt = ERR_PTR(rc);
 
-	rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list);
-
-out:
-	FreeXid(xid);
+success:
 	free_dfs_info_array(referrals, num_referrals);
+free_full_path:
 	kfree(full_path);
+free_xid:
+	FreeXid(xid);
 	cFYI(1, "leaving %s" , __func__);
-	return ERR_PTR(rc);
-out_err:
-	path_put(&nd->path);
-	goto out;
+	return mnt;
+}
+
+/*
+ * Attempt to automount the referral
+ */
+struct vfsmount *cifs_dfs_d_automount(struct path *path)
+{
+	struct vfsmount *newmnt;
+
+	cFYI(1, "in %s", __func__);
+
+	newmnt = cifs_dfs_do_automount(path->dentry);
+	if (IS_ERR(newmnt)) {
+		cFYI(1, "leaving %s [automount failed]" , __func__);
+		return newmnt;
+	}
+
+	mntget(newmnt); /* prevent immediate expiration */
+	mnt_set_expiry(newmnt, &cifs_dfs_automount_list);
+	schedule_delayed_work(&cifs_dfs_automount_task,
+			      cifs_dfs_mountpoint_expiry_timeout);
+	cFYI(1, "leaving %s [ok]" , __func__);
+	return newmnt;
 }
 
 const struct inode_operations cifs_dfs_referral_inode_operations = {
-	.follow_link = cifs_dfs_follow_mountpoint,
 };
-
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 897b2b2..851030f 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -93,6 +93,12 @@
 extern const struct dentry_operations cifs_dentry_ops;
 extern const struct dentry_operations cifs_ci_dentry_ops;
 
+#ifdef CONFIG_CIFS_DFS_UPCALL
+extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
+#else
+#define cifs_dfs_d_automount NULL
+#endif
+
 /* Functions related to symlinks */
 extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
 extern void cifs_put_link(struct dentry *direntry,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 1e95dd6..dd5f229 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -675,6 +675,7 @@
 
 const struct dentry_operations cifs_dentry_ops = {
 	.d_revalidate = cifs_d_revalidate,
+	.d_automount = cifs_dfs_d_automount,
 /* d_delete:       cifs_d_delete,      */ /* not needed except for debugging */
 };
 
@@ -711,4 +712,5 @@
 	.d_revalidate = cifs_d_revalidate,
 	.d_hash = cifs_ci_hash,
 	.d_compare = cifs_ci_compare,
+	.d_automount = cifs_dfs_d_automount,
 };
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index b06b606..6c9ee80 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -32,7 +32,7 @@
 #include "fscache.h"
 
 
-static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
+static void cifs_set_ops(struct inode *inode)
 {
 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 
@@ -60,7 +60,7 @@
 		break;
 	case S_IFDIR:
 #ifdef CONFIG_CIFS_DFS_UPCALL
-		if (is_dfs_referral) {
+		if (IS_AUTOMOUNT(inode)) {
 			inode->i_op = &cifs_dfs_referral_inode_operations;
 		} else {
 #else /* NO DFS support, treat as a directory */
@@ -167,7 +167,9 @@
 	}
 	spin_unlock(&inode->i_lock);
 
-	cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL);
+	if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
+		inode->i_flags |= S_AUTOMOUNT;
+	cifs_set_ops(inode);
 }
 
 void
diff --git a/fs/compat.c b/fs/compat.c
index eb1740a..f6fd0a0 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -257,7 +257,7 @@
 }
 
 /*
- * The following statfs calls are copies of code from fs/open.c and
+ * The following statfs calls are copies of code from fs/statfs.c and
  * should be checked against those from time to time
  */
 asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf)
@@ -320,7 +320,9 @@
 	    __put_user(kbuf->f_namelen, &ubuf->f_namelen) ||
 	    __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) ||
 	    __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) ||
-	    __put_user(kbuf->f_frsize, &ubuf->f_frsize))
+	    __put_user(kbuf->f_frsize, &ubuf->f_frsize) ||
+	    __put_user(kbuf->f_flags, &ubuf->f_flags) ||
+	    __clear_user(ubuf->f_spare, sizeof(ubuf->f_spare)))
 		return -EFAULT;
 	return 0;
 }
@@ -597,10 +599,8 @@
 	if (nr_segs > fast_segs) {
 		ret = -ENOMEM;
 		iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
-		if (iov == NULL) {
-			*ret_pointer = fast_pointer;
+		if (iov == NULL)
 			goto out;
-		}
 	}
 	*ret_pointer = iov;
 
diff --git a/fs/configfs/Kconfig b/fs/configfs/Kconfig
index 13587cc..9febcde 100644
--- a/fs/configfs/Kconfig
+++ b/fs/configfs/Kconfig
@@ -1,8 +1,8 @@
 config CONFIGFS_FS
 	tristate "Userspace-driven configuration filesystem"
-	depends on SYSFS
+	select SYSFS
 	help
-	  configfs is a ram-based filesystem that provides the converse
+	  configfs is a RAM-based filesystem that provides the converse
 	  of sysfs's functionality. Where sysfs is a filesystem-based
 	  view of kernel objects, configfs is a filesystem-based manager
 	  of kernel objects, or config_items.
diff --git a/fs/dcache.c b/fs/dcache.c
index 0c6d5c5..9f493ee 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1357,8 +1357,8 @@
 
 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
 {
-	BUG_ON(dentry->d_op);
-	BUG_ON(dentry->d_flags & (DCACHE_OP_HASH	|
+	WARN_ON_ONCE(dentry->d_op);
+	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
 				DCACHE_OP_COMPARE	|
 				DCACHE_OP_REVALIDATE	|
 				DCACHE_OP_DELETE ));
@@ -1380,8 +1380,11 @@
 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
 {
 	spin_lock(&dentry->d_lock);
-	if (inode)
+	if (inode) {
+		if (unlikely(IS_AUTOMOUNT(inode)))
+			dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
 		list_add(&dentry->d_alias, &inode->i_dentry);
+	}
 	dentry->d_inode = inode;
 	dentry_rcuwalk_barrier(dentry);
 	spin_unlock(&dentry->d_lock);
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index 2dbb422..1897eb1b 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -1,8 +1,7 @@
 menuconfig DLM
 	tristate "Distributed Lock Manager (DLM)"
 	depends on EXPERIMENTAL && INET
-	depends on SYSFS && (IPV6 || IPV6=n)
-	select CONFIGFS_FS
+	depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n)
 	select IP_SCTP
 	help
 	A general purpose distributed lock manager for kernel or userspace
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 1de65f5..0c8d97b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2065,7 +2065,7 @@
 extern void ext4_ext_truncate(struct inode *);
 extern void ext4_ext_init(struct super_block *);
 extern void ext4_ext_release(struct super_block *);
-extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
+extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
 			  loff_t len);
 extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
 			  ssize_t len);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c4068f6..63a7581 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3627,14 +3627,15 @@
 }
 
 /*
- * preallocate space for a file. This implements ext4's fallocate inode
+ * preallocate space for a file. This implements ext4's fallocate file
  * operation, which gets called from sys_fallocate system call.
  * For block-mapped files, posix_fallocate should fall back to the method
  * of writing zeroes to the required new blocks (the same behavior which is
  * expected for file systems which do not support fallocate() system call).
  */
-long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
+long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 {
+	struct inode *inode = file->f_path.dentry->d_inode;
 	handle_t *handle;
 	loff_t new_size;
 	unsigned int max_blocks;
@@ -3645,7 +3646,7 @@
 	unsigned int credits, blkbits = inode->i_blkbits;
 
 	/* We only support the FALLOC_FL_KEEP_SIZE mode */
-	if (mode && (mode != FALLOC_FL_KEEP_SIZE))
+	if (mode & ~FALLOC_FL_KEEP_SIZE)
 		return -EOPNOTSUPP;
 
 	/*
@@ -3655,10 +3656,6 @@
 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 		return -EOPNOTSUPP;
 
-	/* preallocation to directories is currently not supported */
-	if (S_ISDIR(inode->i_mode))
-		return -ENODEV;
-
 	map.m_lblk = offset >> blkbits;
 	/*
 	 * We can't just convert len to max_blocks because
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index bb003dc..2e8322c 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -210,6 +210,7 @@
 	.fsync		= ext4_sync_file,
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= generic_file_splice_write,
+	.fallocate	= ext4_fallocate,
 };
 
 const struct inode_operations ext4_file_inode_operations = {
@@ -223,7 +224,6 @@
 	.removexattr	= generic_removexattr,
 #endif
 	.check_acl	= ext4_check_acl,
-	.fallocate	= ext4_fallocate,
 	.fiemap		= ext4_fiemap,
 };
 
diff --git a/fs/file_table.c b/fs/file_table.c
index c3dee38..c3e89ad 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -311,7 +311,7 @@
 	struct files_struct *files = current->files;
 
 	*fput_needed = 0;
-	if (likely((atomic_read(&files->count) == 1))) {
+	if (atomic_read(&files->count) == 1) {
 		file = fcheck_files(files, fd);
 	} else {
 		rcu_read_lock();
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index 68ca487..78b519c 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -4,6 +4,19 @@
 #include <linux/path.h>
 #include <linux/slab.h>
 #include <linux/fs_struct.h>
+#include "internal.h"
+
+static inline void path_get_longterm(struct path *path)
+{
+	path_get(path);
+	mnt_make_longterm(path->mnt);
+}
+
+static inline void path_put_longterm(struct path *path)
+{
+	mnt_make_shortterm(path->mnt);
+	path_put(path);
+}
 
 /*
  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
@@ -17,11 +30,11 @@
 	write_seqcount_begin(&fs->seq);
 	old_root = fs->root;
 	fs->root = *path;
-	path_get_long(path);
+	path_get_longterm(path);
 	write_seqcount_end(&fs->seq);
 	spin_unlock(&fs->lock);
 	if (old_root.dentry)
-		path_put_long(&old_root);
+		path_put_longterm(&old_root);
 }
 
 /*
@@ -36,12 +49,12 @@
 	write_seqcount_begin(&fs->seq);
 	old_pwd = fs->pwd;
 	fs->pwd = *path;
-	path_get_long(path);
+	path_get_longterm(path);
 	write_seqcount_end(&fs->seq);
 	spin_unlock(&fs->lock);
 
 	if (old_pwd.dentry)
-		path_put_long(&old_pwd);
+		path_put_longterm(&old_pwd);
 }
 
 void chroot_fs_refs(struct path *old_root, struct path *new_root)
@@ -59,13 +72,13 @@
 			write_seqcount_begin(&fs->seq);
 			if (fs->root.dentry == old_root->dentry
 			    && fs->root.mnt == old_root->mnt) {
-				path_get_long(new_root);
+				path_get_longterm(new_root);
 				fs->root = *new_root;
 				count++;
 			}
 			if (fs->pwd.dentry == old_root->dentry
 			    && fs->pwd.mnt == old_root->mnt) {
-				path_get_long(new_root);
+				path_get_longterm(new_root);
 				fs->pwd = *new_root;
 				count++;
 			}
@@ -76,13 +89,13 @@
 	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
 	while (count--)
-		path_put_long(old_root);
+		path_put_longterm(old_root);
 }
 
 void free_fs_struct(struct fs_struct *fs)
 {
-	path_put_long(&fs->root);
-	path_put_long(&fs->pwd);
+	path_put_longterm(&fs->root);
+	path_put_longterm(&fs->pwd);
 	kmem_cache_free(fs_cachep, fs);
 }
 
@@ -118,9 +131,9 @@
 
 		spin_lock(&old->lock);
 		fs->root = old->root;
-		path_get_long(&fs->root);
+		path_get_longterm(&fs->root);
 		fs->pwd = old->pwd;
-		path_get_long(&fs->pwd);
+		path_get_longterm(&fs->pwd);
 		spin_unlock(&old->lock);
 	}
 	return fs;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index fca6689..7cfdcb9 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -19,6 +19,8 @@
 #include <linux/fs.h>
 #include <linux/gfs2_ondisk.h>
 #include <linux/ext2_fs.h>
+#include <linux/falloc.h>
+#include <linux/swap.h>
 #include <linux/crc32.h>
 #include <linux/writeback.h>
 #include <asm/uaccess.h>
@@ -610,6 +612,260 @@
 	return generic_file_aio_write(iocb, iov, nr_segs, pos);
 }
 
+static void empty_write_end(struct page *page, unsigned from,
+			   unsigned to)
+{
+	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+
+	page_zero_new_buffers(page, from, to);
+	flush_dcache_page(page);
+	mark_page_accessed(page);
+
+	if (!gfs2_is_writeback(ip))
+		gfs2_page_add_databufs(ip, page, from, to);
+
+	block_commit_write(page, from, to);
+}
+
+static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
+{
+	unsigned start, end, next;
+	struct buffer_head *bh, *head;
+	int error;
+
+	if (!page_has_buffers(page)) {
+		error = __block_write_begin(page, from, to - from, gfs2_block_map);
+		if (unlikely(error))
+			return error;
+
+		empty_write_end(page, from, to);
+		return 0;
+	}
+
+	bh = head = page_buffers(page);
+	next = end = 0;
+	while (next < from) {
+		next += bh->b_size;
+		bh = bh->b_this_page;
+	}
+	start = next;
+	do {
+		next += bh->b_size;
+		if (buffer_mapped(bh)) {
+			if (end) {
+				error = __block_write_begin(page, start, end - start,
+							    gfs2_block_map);
+				if (unlikely(error))
+					return error;
+				empty_write_end(page, start, end);
+				end = 0;
+			}
+			start = next;
+		}
+		else
+			end = next;
+		bh = bh->b_this_page;
+	} while (next < to);
+
+	if (end) {
+		error = __block_write_begin(page, start, end - start, gfs2_block_map);
+		if (unlikely(error))
+			return error;
+		empty_write_end(page, start, end);
+	}
+
+	return 0;
+}
+
+static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
+			   int mode)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct buffer_head *dibh;
+	int error;
+	u64 start = offset >> PAGE_CACHE_SHIFT;
+	unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
+	u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+	pgoff_t curr;
+	struct page *page;
+	unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
+	unsigned int from, to;
+
+	if (!end_offset)
+		end_offset = PAGE_CACHE_SIZE;
+
+	error = gfs2_meta_inode_buffer(ip, &dibh);
+	if (unlikely(error))
+		goto out;
+
+	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+
+	if (gfs2_is_stuffed(ip)) {
+		error = gfs2_unstuff_dinode(ip, NULL);
+		if (unlikely(error))
+			goto out;
+	}
+
+	curr = start;
+	offset = start << PAGE_CACHE_SHIFT;
+	from = start_offset;
+	to = PAGE_CACHE_SIZE;
+	while (curr <= end) {
+		page = grab_cache_page_write_begin(inode->i_mapping, curr,
+						   AOP_FLAG_NOFS);
+		if (unlikely(!page)) {
+			error = -ENOMEM;
+			goto out;
+		}
+
+		if (curr == end)
+			to = end_offset;
+		error = write_empty_blocks(page, from, to);
+		if (!error && offset + to > inode->i_size &&
+		    !(mode & FALLOC_FL_KEEP_SIZE)) {
+			i_size_write(inode, offset + to);
+		}
+		unlock_page(page);
+		page_cache_release(page);
+		if (error)
+			goto out;
+		curr++;
+		offset += PAGE_CACHE_SIZE;
+		from = 0;
+	}
+
+	gfs2_dinode_out(ip, dibh->b_data);
+	mark_inode_dirty(inode);
+
+	brelse(dibh);
+
+out:
+	return error;
+}
+
+static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
+			    unsigned int *data_blocks, unsigned int *ind_blocks)
+{
+	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+	unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
+	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
+
+	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
+		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
+		max_data -= tmp;
+	}
+	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
+	   so it might end up with fewer data blocks */
+	if (max_data <= *data_blocks)
+		return;
+	*data_blocks = max_data;
+	*ind_blocks = max_blocks - max_data;
+	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
+	if (*len > max) {
+		*len = max;
+		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
+	}
+}
+
+static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
+			   loff_t len)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct gfs2_sbd *sdp = GFS2_SB(inode);
+	struct gfs2_inode *ip = GFS2_I(inode);
+	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
+	loff_t bytes, max_bytes;
+	struct gfs2_alloc *al;
+	int error;
+	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
+	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
+
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode & ~FALLOC_FL_KEEP_SIZE)
+		return -EOPNOTSUPP;
+
+	offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
+		 sdp->sd_sb.sb_bsize_shift;
+
+	len = next - offset;
+	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
+	if (!bytes)
+		bytes = UINT_MAX;
+
+	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
+	error = gfs2_glock_nq(&ip->i_gh);
+	if (unlikely(error))
+		goto out_uninit;
+
+	if (!gfs2_write_alloc_required(ip, offset, len))
+		goto out_unlock;
+
+	while (len > 0) {
+		if (len < bytes)
+			bytes = len;
+		al = gfs2_alloc_get(ip);
+		if (!al) {
+			error = -ENOMEM;
+			goto out_unlock;
+		}
+
+		error = gfs2_quota_lock_check(ip);
+		if (error)
+			goto out_alloc_put;
+
+retry:
+		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
+
+		al->al_requested = data_blocks + ind_blocks;
+		error = gfs2_inplace_reserve(ip);
+		if (error) {
+			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
+				bytes >>= 1;
+				goto retry;
+			}
+			goto out_qunlock;
+		}
+		max_bytes = bytes;
+		calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
+		al->al_requested = data_blocks + ind_blocks;
+
+		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
+			  RES_RG_HDR + gfs2_rg_blocks(al);
+		if (gfs2_is_jdata(ip))
+			rblocks += data_blocks ? data_blocks : 1;
+
+		error = gfs2_trans_begin(sdp, rblocks,
+					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+		if (error)
+			goto out_trans_fail;
+
+		error = fallocate_chunk(inode, offset, max_bytes, mode);
+		gfs2_trans_end(sdp);
+
+		if (error)
+			goto out_trans_fail;
+
+		len -= max_bytes;
+		offset += max_bytes;
+		gfs2_inplace_release(ip);
+		gfs2_quota_unlock(ip);
+		gfs2_alloc_put(ip);
+	}
+	goto out_unlock;
+
+out_trans_fail:
+	gfs2_inplace_release(ip);
+out_qunlock:
+	gfs2_quota_unlock(ip);
+out_alloc_put:
+	gfs2_alloc_put(ip);
+out_unlock:
+	gfs2_glock_dq(&ip->i_gh);
+out_uninit:
+	gfs2_holder_uninit(&ip->i_gh);
+	return error;
+}
+
 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
 
 /**
@@ -765,6 +1021,7 @@
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= generic_file_splice_write,
 	.setlease	= gfs2_setlease,
+	.fallocate	= gfs2_fallocate,
 };
 
 const struct file_operations gfs2_dir_fops = {
@@ -794,6 +1051,7 @@
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= generic_file_splice_write,
 	.setlease	= generic_setlease,
+	.fallocate	= gfs2_fallocate,
 };
 
 const struct file_operations gfs2_dir_fops_nolock = {
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 040b5a2..d8b26ac 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -18,8 +18,6 @@
 #include <linux/gfs2_ondisk.h>
 #include <linux/crc32.h>
 #include <linux/fiemap.h>
-#include <linux/swap.h>
-#include <linux/falloc.h>
 #include <asm/uaccess.h>
 
 #include "gfs2.h"
@@ -1257,261 +1255,6 @@
 	return ret;
 }
 
-static void empty_write_end(struct page *page, unsigned from,
-			   unsigned to)
-{
-	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
-
-	page_zero_new_buffers(page, from, to);
-	flush_dcache_page(page);
-	mark_page_accessed(page);
-
-	if (!gfs2_is_writeback(ip))
-		gfs2_page_add_databufs(ip, page, from, to);
-
-	block_commit_write(page, from, to);
-}
-
-
-static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
-{
-	unsigned start, end, next;
-	struct buffer_head *bh, *head;
-	int error;
-
-	if (!page_has_buffers(page)) {
-		error = __block_write_begin(page, from, to - from, gfs2_block_map);
-		if (unlikely(error))
-			return error;
-
-		empty_write_end(page, from, to);
-		return 0;
-	}
-
-	bh = head = page_buffers(page);
-	next = end = 0;
-	while (next < from) {
-		next += bh->b_size;
-		bh = bh->b_this_page;
-	}
-	start = next;
-	do {
-		next += bh->b_size;
-		if (buffer_mapped(bh)) {
-			if (end) {
-				error = __block_write_begin(page, start, end - start,
-							    gfs2_block_map);
-				if (unlikely(error))
-					return error;
-				empty_write_end(page, start, end);
-				end = 0;
-			}
-			start = next;
-		}
-		else
-			end = next;
-		bh = bh->b_this_page;
-	} while (next < to);
-
-	if (end) {
-		error = __block_write_begin(page, start, end - start, gfs2_block_map);
-		if (unlikely(error))
-			return error;
-		empty_write_end(page, start, end);
-	}
-
-	return 0;
-}
-
-static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
-			   int mode)
-{
-	struct gfs2_inode *ip = GFS2_I(inode);
-	struct buffer_head *dibh;
-	int error;
-	u64 start = offset >> PAGE_CACHE_SHIFT;
-	unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
-	u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
-	pgoff_t curr;
-	struct page *page;
-	unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
-	unsigned int from, to;
-
-	if (!end_offset)
-		end_offset = PAGE_CACHE_SIZE;
-
-	error = gfs2_meta_inode_buffer(ip, &dibh);
-	if (unlikely(error))
-		goto out;
-
-	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-
-	if (gfs2_is_stuffed(ip)) {
-		error = gfs2_unstuff_dinode(ip, NULL);
-		if (unlikely(error))
-			goto out;
-	}
-
-	curr = start;
-	offset = start << PAGE_CACHE_SHIFT;
-	from = start_offset;
-	to = PAGE_CACHE_SIZE;
-	while (curr <= end) {
-		page = grab_cache_page_write_begin(inode->i_mapping, curr,
-						   AOP_FLAG_NOFS);
-		if (unlikely(!page)) {
-			error = -ENOMEM;
-			goto out;
-		}
-
-		if (curr == end)
-			to = end_offset;
-		error = write_empty_blocks(page, from, to);
-		if (!error && offset + to > inode->i_size &&
-		    !(mode & FALLOC_FL_KEEP_SIZE)) {
-			i_size_write(inode, offset + to);
-		}
-		unlock_page(page);
-		page_cache_release(page);
-		if (error)
-			goto out;
-		curr++;
-		offset += PAGE_CACHE_SIZE;
-		from = 0;
-	}
-
-	gfs2_dinode_out(ip, dibh->b_data);
-	mark_inode_dirty(inode);
-
-	brelse(dibh);
-
-out:
-	return error;
-}
-
-static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
-			    unsigned int *data_blocks, unsigned int *ind_blocks)
-{
-	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
-	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
-
-	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
-		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
-		max_data -= tmp;
-	}
-	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
-	   so it might end up with fewer data blocks */
-	if (max_data <= *data_blocks)
-		return;
-	*data_blocks = max_data;
-	*ind_blocks = max_blocks - max_data;
-	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
-	if (*len > max) {
-		*len = max;
-		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
-	}
-}
-
-static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset,
-			   loff_t len)
-{
-	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	struct gfs2_inode *ip = GFS2_I(inode);
-	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
-	loff_t bytes, max_bytes;
-	struct gfs2_alloc *al;
-	int error;
-	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
-	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
-
-	/* We only support the FALLOC_FL_KEEP_SIZE mode */
-	if (mode && (mode != FALLOC_FL_KEEP_SIZE))
-		return -EOPNOTSUPP;
-
-	offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
-		 sdp->sd_sb.sb_bsize_shift;
-
-	len = next - offset;
-	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
-	if (!bytes)
-		bytes = UINT_MAX;
-
-	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
-	error = gfs2_glock_nq(&ip->i_gh);
-	if (unlikely(error))
-		goto out_uninit;
-
-	if (!gfs2_write_alloc_required(ip, offset, len))
-		goto out_unlock;
-
-	while (len > 0) {
-		if (len < bytes)
-			bytes = len;
-		al = gfs2_alloc_get(ip);
-		if (!al) {
-			error = -ENOMEM;
-			goto out_unlock;
-		}
-
-		error = gfs2_quota_lock_check(ip);
-		if (error)
-			goto out_alloc_put;
-
-retry:
-		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
-
-		al->al_requested = data_blocks + ind_blocks;
-		error = gfs2_inplace_reserve(ip);
-		if (error) {
-			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
-				bytes >>= 1;
-				goto retry;
-			}
-			goto out_qunlock;
-		}
-		max_bytes = bytes;
-		calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
-		al->al_requested = data_blocks + ind_blocks;
-
-		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
-			  RES_RG_HDR + gfs2_rg_blocks(al);
-		if (gfs2_is_jdata(ip))
-			rblocks += data_blocks ? data_blocks : 1;
-
-		error = gfs2_trans_begin(sdp, rblocks,
-					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
-		if (error)
-			goto out_trans_fail;
-
-		error = fallocate_chunk(inode, offset, max_bytes, mode);
-		gfs2_trans_end(sdp);
-
-		if (error)
-			goto out_trans_fail;
-
-		len -= max_bytes;
-		offset += max_bytes;
-		gfs2_inplace_release(ip);
-		gfs2_quota_unlock(ip);
-		gfs2_alloc_put(ip);
-	}
-	goto out_unlock;
-
-out_trans_fail:
-	gfs2_inplace_release(ip);
-out_qunlock:
-	gfs2_quota_unlock(ip);
-out_alloc_put:
-	gfs2_alloc_put(ip);
-out_unlock:
-	gfs2_glock_dq(&ip->i_gh);
-out_uninit:
-	gfs2_holder_uninit(&ip->i_gh);
-	return error;
-}
-
-
 static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		       u64 start, u64 len)
 {
@@ -1562,7 +1305,6 @@
 	.getxattr = gfs2_getxattr,
 	.listxattr = gfs2_listxattr,
 	.removexattr = gfs2_removexattr,
-	.fallocate = gfs2_fallocate,
 	.fiemap = gfs2_fiemap,
 };
 
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 56f0da1..1ae35ba 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -281,7 +281,7 @@
 	    attr->ia_size != i_size_read(inode)) {
 		error = vmtruncate(inode, attr->ia_size);
 		if (error)
-			return error;
+			goto out_unlock;
 	}
 
 	setattr_copy(inode, attr);
diff --git a/fs/internal.h b/fs/internal.h
index 9687c2e..0663568 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -70,6 +70,10 @@
 extern void release_mounts(struct list_head *);
 extern void umount_tree(struct vfsmount *, int, struct list_head *);
 extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
+extern int finish_automount(struct vfsmount *, struct path *);
+
+extern void mnt_make_longterm(struct vfsmount *);
+extern void mnt_make_shortterm(struct vfsmount *);
 
 extern void __init mnt_init(void);
 
diff --git a/fs/ioctl.c b/fs/ioctl.c
index d6cc164..a59635e 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -86,7 +86,7 @@
 			    u64 phys, u64 len, u32 flags)
 {
 	struct fiemap_extent extent;
-	struct fiemap_extent *dest = fieinfo->fi_extents_start;
+	struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
 
 	/* only count the extents */
 	if (fieinfo->fi_extents_max == 0) {
@@ -173,6 +173,7 @@
 static int ioctl_fiemap(struct file *filp, unsigned long arg)
 {
 	struct fiemap fiemap;
+	struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
 	struct fiemap_extent_info fieinfo = { 0, };
 	struct inode *inode = filp->f_path.dentry->d_inode;
 	struct super_block *sb = inode->i_sb;
@@ -182,8 +183,7 @@
 	if (!inode->i_op->fiemap)
 		return -EOPNOTSUPP;
 
-	if (copy_from_user(&fiemap, (struct fiemap __user *)arg,
-			   sizeof(struct fiemap)))
+	if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
 		return -EFAULT;
 
 	if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
@@ -196,7 +196,7 @@
 
 	fieinfo.fi_flags = fiemap.fm_flags;
 	fieinfo.fi_extents_max = fiemap.fm_extent_count;
-	fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
+	fieinfo.fi_extents_start = ufiemap->fm_extents;
 
 	if (fiemap.fm_extent_count != 0 &&
 	    !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
@@ -209,7 +209,7 @@
 	error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
 	fiemap.fm_flags = fieinfo.fi_flags;
 	fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
-	if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
+	if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
 		error = -EFAULT;
 
 	return error;
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 85c6be2..3005ec4 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -336,14 +336,13 @@
 	size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
 #ifndef __ECOS
 	if (jffs2_blocks_use_vmalloc(c))
-		c->blocks = vmalloc(size);
+		c->blocks = vzalloc(size);
 	else
 #endif
-		c->blocks = kmalloc(size, GFP_KERNEL);
+		c->blocks = kzalloc(size, GFP_KERNEL);
 	if (!c->blocks)
 		return -ENOMEM;
 
-	memset(c->blocks, 0, size);
 	for (i=0; i<c->nr_blocks; i++) {
 		INIT_LIST_HEAD(&c->blocks[i].list);
 		c->blocks[i].offset = i * c->sector_size;
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index f864005..0bc6a6c 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -144,4 +144,4 @@
 	void *os_priv;
 };
 
-#endif /* _JFFS2_FB_SB */
+#endif /* _JFFS2_FS_SB */
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 9b572ca..4f9cc04 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -151,7 +151,7 @@
 		JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
 			    offset, je32_to_cpu(rx.hdr_crc), crc);
 		xd->flags |= JFFS2_XFLAGS_INVALID;
-		return EIO;
+		return -EIO;
 	}
 	totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
 	if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
@@ -167,7 +167,7 @@
 			    je32_to_cpu(rx.xid), xd->xid,
 			    je32_to_cpu(rx.version), xd->version);
 		xd->flags |= JFFS2_XFLAGS_INVALID;
-		return EIO;
+		return -EIO;
 	}
 	xd->xprefix = rx.xprefix;
 	xd->name_len = rx.name_len;
@@ -230,7 +230,7 @@
 			      ref_offset(xd->node), xd->data_crc, crc);
 		kfree(data);
 		xd->flags |= JFFS2_XFLAGS_INVALID;
-		return EIO;
+		return -EIO;
 	}
 
 	xd->flags |= JFFS2_XFLAGS_HOT;
@@ -268,7 +268,7 @@
 	if (xd->xname)
 		return 0;
 	if (xd->flags & JFFS2_XFLAGS_INVALID)
-		return EIO;
+		return -EIO;
 	if (unlikely(is_xattr_datum_unchecked(c, xd)))
 		rc = do_verify_xattr_datum(c, xd);
 	if (!rc)
@@ -460,7 +460,7 @@
 	if (crc != je32_to_cpu(rr.node_crc)) {
 		JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
 			    offset, je32_to_cpu(rr.node_crc), crc);
-		return EIO;
+		return -EIO;
 	}
 	if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
 	    || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
@@ -470,7 +470,7 @@
 			    offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
 			    je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
 			    je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
-		return EIO;
+		return -EIO;
 	}
 	ref->ino = je32_to_cpu(rr.ino);
 	ref->xid = je32_to_cpu(rr.xid);
diff --git a/fs/locks.c b/fs/locks.c
index 08415b2..0f39982 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -444,15 +444,9 @@
 	fl->fl_file->f_owner.signum = 0;
 }
 
-static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
-{
-	return fl->fl_file == try->fl_file;
-}
-
 static const struct lock_manager_operations lease_manager_ops = {
 	.fl_break = lease_break_callback,
 	.fl_release_private = lease_release_private_callback,
-	.fl_mylease = lease_mylease_callback,
 	.fl_change = lease_modify,
 };
 
@@ -1405,7 +1399,7 @@
 	for (before = &inode->i_flock;
 			((fl = *before) != NULL) && IS_LEASE(fl);
 			before = &fl->fl_next) {
-		if (lease->fl_lmops->fl_mylease(fl, lease))
+		if (fl->fl_file == filp)
 			my_before = before;
 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
 			/*
diff --git a/fs/namei.c b/fs/namei.c
index 8df7a78..b753192 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -368,18 +368,6 @@
 EXPORT_SYMBOL(path_get);
 
 /**
- * path_get_long - get a long reference to a path
- * @path: path to get the reference to
- *
- * Given a path increment the reference count to the dentry and the vfsmount.
- */
-void path_get_long(struct path *path)
-{
-	mntget_long(path->mnt);
-	dget(path->dentry);
-}
-
-/**
  * path_put - put a reference to a path
  * @path: path to put the reference to
  *
@@ -393,18 +381,6 @@
 EXPORT_SYMBOL(path_put);
 
 /**
- * path_put_long - put a long reference to a path
- * @path: path to put the reference to
- *
- * Given a path decrement the reference count to the dentry and the vfsmount.
- */
-void path_put_long(struct path *path)
-{
-	dput(path->dentry);
-	mntput_long(path->mnt);
-}
-
-/**
  * nameidata_drop_rcu - drop this nameidata out of rcu-walk
  * @nd: nameidata pathwalk data to drop
  * Returns: 0 on success, -ECHILD on failure
@@ -800,12 +776,8 @@
 	touch_atime(link->mnt, dentry);
 	nd_set_link(nd, NULL);
 
-	if (link->mnt != nd->path.mnt) {
-		path_to_nameidata(link, nd);
-		nd->inode = nd->path.dentry->d_inode;
-		dget(dentry);
-	}
-	mntget(link->mnt);
+	if (link->mnt == nd->path.mnt)
+		mntget(link->mnt);
 
 	nd->last_type = LAST_BIND;
 	*p = dentry->d_inode->i_op->follow_link(dentry, nd);
@@ -896,54 +868,148 @@
 }
 
 /*
- * serialization is taken care of in namespace.c
+ * Perform an automount
+ * - return -EISDIR to tell follow_managed() to stop and return the path we
+ *   were called with.
  */
-static void __follow_mount_rcu(struct nameidata *nd, struct path *path,
-				struct inode **inode)
+static int follow_automount(struct path *path, unsigned flags,
+			    bool *need_mntput)
 {
-	while (d_mountpoint(path->dentry)) {
-		struct vfsmount *mounted;
-		mounted = __lookup_mnt(path->mnt, path->dentry, 1);
-		if (!mounted)
-			return;
-		path->mnt = mounted;
-		path->dentry = mounted->mnt_root;
-		nd->seq = read_seqcount_begin(&path->dentry->d_seq);
-		*inode = path->dentry->d_inode;
-	}
-}
+	struct vfsmount *mnt;
+	int err;
 
-static int __follow_mount(struct path *path)
-{
-	int res = 0;
-	while (d_mountpoint(path->dentry)) {
-		struct vfsmount *mounted = lookup_mnt(path);
-		if (!mounted)
-			break;
+	if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
+		return -EREMOTE;
+
+	/* We don't want to mount if someone supplied AT_NO_AUTOMOUNT
+	 * and this is the terminal part of the path.
+	 */
+	if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_CONTINUE))
+		return -EISDIR; /* we actually want to stop here */
+
+	/* We want to mount if someone is trying to open/create a file of any
+	 * type under the mountpoint, wants to traverse through the mountpoint
+	 * or wants to open the mounted directory.
+	 *
+	 * We don't want to mount if someone's just doing a stat and they've
+	 * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
+	 * appended a '/' to the name.
+	 */
+	if (!(flags & LOOKUP_FOLLOW) &&
+	    !(flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY |
+		       LOOKUP_OPEN | LOOKUP_CREATE)))
+		return -EISDIR;
+
+	current->total_link_count++;
+	if (current->total_link_count >= 40)
+		return -ELOOP;
+
+	mnt = path->dentry->d_op->d_automount(path);
+	if (IS_ERR(mnt)) {
+		/*
+		 * The filesystem is allowed to return -EISDIR here to indicate
+		 * it doesn't want to automount.  For instance, autofs would do
+		 * this so that its userspace daemon can mount on this dentry.
+		 *
+		 * However, we can only permit this if it's a terminal point in
+		 * the path being looked up; if it wasn't then the remainder of
+		 * the path is inaccessible and we should say so.
+		 */
+		if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_CONTINUE))
+			return -EREMOTE;
+		return PTR_ERR(mnt);
+	}
+
+	if (!mnt) /* mount collision */
+		return 0;
+
+	err = finish_automount(mnt, path);
+
+	switch (err) {
+	case -EBUSY:
+		/* Someone else made a mount here whilst we were busy */
+		return 0;
+	case 0:
 		dput(path->dentry);
-		if (res)
+		if (*need_mntput)
 			mntput(path->mnt);
-		path->mnt = mounted;
-		path->dentry = dget(mounted->mnt_root);
-		res = 1;
+		path->mnt = mnt;
+		path->dentry = dget(mnt->mnt_root);
+		*need_mntput = true;
+		return 0;
+	default:
+		return err;
 	}
-	return res;
+
 }
 
-static void follow_mount(struct path *path)
+/*
+ * Handle a dentry that is managed in some way.
+ * - Flagged for transit management (autofs)
+ * - Flagged as mountpoint
+ * - Flagged as automount point
+ *
+ * This may only be called in refwalk mode.
+ *
+ * Serialization is taken care of in namespace.c
+ */
+static int follow_managed(struct path *path, unsigned flags)
 {
-	while (d_mountpoint(path->dentry)) {
-		struct vfsmount *mounted = lookup_mnt(path);
-		if (!mounted)
-			break;
-		dput(path->dentry);
-		mntput(path->mnt);
-		path->mnt = mounted;
-		path->dentry = dget(mounted->mnt_root);
+	unsigned managed;
+	bool need_mntput = false;
+	int ret;
+
+	/* Given that we're not holding a lock here, we retain the value in a
+	 * local variable for each dentry as we look at it so that we don't see
+	 * the components of that value change under us */
+	while (managed = ACCESS_ONCE(path->dentry->d_flags),
+	       managed &= DCACHE_MANAGED_DENTRY,
+	       unlikely(managed != 0)) {
+		/* Allow the filesystem to manage the transit without i_mutex
+		 * being held. */
+		if (managed & DCACHE_MANAGE_TRANSIT) {
+			BUG_ON(!path->dentry->d_op);
+			BUG_ON(!path->dentry->d_op->d_manage);
+			ret = path->dentry->d_op->d_manage(path->dentry,
+							   false, false);
+			if (ret < 0)
+				return ret == -EISDIR ? 0 : ret;
+		}
+
+		/* Transit to a mounted filesystem. */
+		if (managed & DCACHE_MOUNTED) {
+			struct vfsmount *mounted = lookup_mnt(path);
+			if (mounted) {
+				dput(path->dentry);
+				if (need_mntput)
+					mntput(path->mnt);
+				path->mnt = mounted;
+				path->dentry = dget(mounted->mnt_root);
+				need_mntput = true;
+				continue;
+			}
+
+			/* Something is mounted on this dentry in another
+			 * namespace and/or whatever was mounted there in this
+			 * namespace got unmounted before we managed to get the
+			 * vfsmount_lock */
+		}
+
+		/* Handle an automount point */
+		if (managed & DCACHE_NEED_AUTOMOUNT) {
+			ret = follow_automount(path, flags, &need_mntput);
+			if (ret < 0)
+				return ret == -EISDIR ? 0 : ret;
+			continue;
+		}
+
+		/* We didn't change the current path point */
+		break;
 	}
+	return 0;
 }
 
-int follow_down(struct path *path)
+int follow_down_one(struct path *path)
 {
 	struct vfsmount *mounted;
 
@@ -958,13 +1024,41 @@
 	return 0;
 }
 
+/*
+ * Skip to top of mountpoint pile in rcuwalk mode.  We abort the rcu-walk if we
+ * meet a managed dentry and we're not walking to "..".  True is returned to
+ * continue, false to abort.
+ */
+static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
+			       struct inode **inode, bool reverse_transit)
+{
+	while (d_mountpoint(path->dentry)) {
+		struct vfsmount *mounted;
+		if (unlikely(path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) &&
+		    !reverse_transit &&
+		    path->dentry->d_op->d_manage(path->dentry, false, true) < 0)
+			return false;
+		mounted = __lookup_mnt(path->mnt, path->dentry, 1);
+		if (!mounted)
+			break;
+		path->mnt = mounted;
+		path->dentry = mounted->mnt_root;
+		nd->seq = read_seqcount_begin(&path->dentry->d_seq);
+		*inode = path->dentry->d_inode;
+	}
+
+	if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+		return reverse_transit;
+	return true;
+}
+
 static int follow_dotdot_rcu(struct nameidata *nd)
 {
 	struct inode *inode = nd->inode;
 
 	set_root_rcu(nd);
 
-	while(1) {
+	while (1) {
 		if (nd->path.dentry == nd->root.dentry &&
 		    nd->path.mnt == nd->root.mnt) {
 			break;
@@ -987,12 +1081,80 @@
 		nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
 		inode = nd->path.dentry->d_inode;
 	}
-	__follow_mount_rcu(nd, &nd->path, &inode);
+	__follow_mount_rcu(nd, &nd->path, &inode, true);
 	nd->inode = inode;
 
 	return 0;
 }
 
+/*
+ * Follow down to the covering mount currently visible to userspace.  At each
+ * point, the filesystem owning that dentry may be queried as to whether the
+ * caller is permitted to proceed or not.
+ *
+ * Care must be taken as namespace_sem may be held (indicated by mounting_here
+ * being true).
+ */
+int follow_down(struct path *path, bool mounting_here)
+{
+	unsigned managed;
+	int ret;
+
+	while (managed = ACCESS_ONCE(path->dentry->d_flags),
+	       unlikely(managed & DCACHE_MANAGED_DENTRY)) {
+		/* Allow the filesystem to manage the transit without i_mutex
+		 * being held.
+		 *
+		 * We indicate to the filesystem if someone is trying to mount
+		 * something here.  This gives autofs the chance to deny anyone
+		 * other than its daemon the right to mount on its
+		 * superstructure.
+		 *
+		 * The filesystem may sleep at this point.
+		 */
+		if (managed & DCACHE_MANAGE_TRANSIT) {
+			BUG_ON(!path->dentry->d_op);
+			BUG_ON(!path->dentry->d_op->d_manage);
+			ret = path->dentry->d_op->d_manage(
+				path->dentry, mounting_here, false);
+			if (ret < 0)
+				return ret == -EISDIR ? 0 : ret;
+		}
+
+		/* Transit to a mounted filesystem. */
+		if (managed & DCACHE_MOUNTED) {
+			struct vfsmount *mounted = lookup_mnt(path);
+			if (!mounted)
+				break;
+			dput(path->dentry);
+			mntput(path->mnt);
+			path->mnt = mounted;
+			path->dentry = dget(mounted->mnt_root);
+			continue;
+		}
+
+		/* Don't handle automount points here */
+		break;
+	}
+	return 0;
+}
+
+/*
+ * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
+ */
+static void follow_mount(struct path *path)
+{
+	while (d_mountpoint(path->dentry)) {
+		struct vfsmount *mounted = lookup_mnt(path);
+		if (!mounted)
+			break;
+		dput(path->dentry);
+		mntput(path->mnt);
+		path->mnt = mounted;
+		path->dentry = dget(mounted->mnt_root);
+	}
+}
+
 static void follow_dotdot(struct nameidata *nd)
 {
 	set_root(nd);
@@ -1057,12 +1219,14 @@
 	struct vfsmount *mnt = nd->path.mnt;
 	struct dentry *dentry, *parent = nd->path.dentry;
 	struct inode *dir;
+	int err;
+
 	/*
 	 * See if the low-level filesystem might want
 	 * to use its own hash..
 	 */
 	if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
-		int err = parent->d_op->d_hash(parent, nd->inode, name);
+		err = parent->d_op->d_hash(parent, nd->inode, name);
 		if (err < 0)
 			return err;
 	}
@@ -1089,22 +1253,28 @@
 		nd->seq = seq;
 		if (dentry->d_flags & DCACHE_OP_REVALIDATE)
 			goto need_revalidate;
+done2:
 		path->mnt = mnt;
 		path->dentry = dentry;
-		__follow_mount_rcu(nd, path, inode);
-	} else {
-		dentry = __d_lookup(parent, name);
-		if (!dentry)
-			goto need_lookup;
-found:
-		if (dentry->d_flags & DCACHE_OP_REVALIDATE)
-			goto need_revalidate;
-done:
-		path->mnt = mnt;
-		path->dentry = dentry;
-		__follow_mount(path);
-		*inode = path->dentry->d_inode;
+		if (likely(__follow_mount_rcu(nd, path, inode, false)))
+			return 0;
+		if (nameidata_drop_rcu(nd))
+			return -ECHILD;
+		/* fallthru */
 	}
+	dentry = __d_lookup(parent, name);
+	if (!dentry)
+		goto need_lookup;
+found:
+	if (dentry->d_flags & DCACHE_OP_REVALIDATE)
+		goto need_revalidate;
+done:
+	path->mnt = mnt;
+	path->dentry = dentry;
+	err = follow_managed(path, nd->flags);
+	if (unlikely(err < 0))
+		return err;
+	*inode = path->dentry->d_inode;
 	return 0;
 
 need_lookup:
@@ -1143,6 +1313,8 @@
 		goto need_lookup;
 	if (IS_ERR(dentry))
 		goto fail;
+	if (nd->flags & LOOKUP_RCU)
+		goto done2;
 	goto done;
 
 fail:
@@ -1150,17 +1322,6 @@
 }
 
 /*
- * This is a temporary kludge to deal with "automount" symlinks; proper
- * solution is to trigger them on follow_mount(), so that do_lookup()
- * would DTRT.  To be killed before 2.6.34-final.
- */
-static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
-{
-	return inode && unlikely(inode->i_op->follow_link) &&
-		((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
-}
-
-/*
  * Name resolution.
  * This is the basic name resolution function, turning a pathname into
  * the final dentry. We expect 'base' to be positive and a directory.
@@ -1298,7 +1459,8 @@
 		err = do_lookup(nd, &this, &next, &inode);
 		if (err)
 			break;
-		if (follow_on_final(inode, lookup_flags)) {
+		if (inode && unlikely(inode->i_op->follow_link) &&
+		    (lookup_flags & LOOKUP_FOLLOW)) {
 			if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
 				return -ECHILD;
 			BUG_ON(inode != next.dentry->d_inode);
@@ -2200,11 +2362,9 @@
 	if (open_flag & O_EXCL)
 		goto exit_dput;
 
-	if (__follow_mount(path)) {
-		error = -ELOOP;
-		if (open_flag & O_NOFOLLOW)
-			goto exit_dput;
-	}
+	error = follow_managed(path, nd->flags);
+	if (error < 0)
+		goto exit_dput;
 
 	error = -ENOENT;
 	if (!path->dentry->d_inode)
@@ -2353,8 +2513,7 @@
 		struct inode *linki = link.dentry->d_inode;
 		void *cookie;
 		error = -ELOOP;
-		/* S_ISDIR part is a temporary automount kludge */
-		if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(linki->i_mode))
+		if (!(nd.flags & LOOKUP_FOLLOW))
 			goto exit_dput;
 		if (count++ == 32)
 			goto exit_dput;
@@ -3413,6 +3572,7 @@
 };
 
 EXPORT_SYMBOL(user_path_at);
+EXPORT_SYMBOL(follow_down_one);
 EXPORT_SYMBOL(follow_down);
 EXPORT_SYMBOL(follow_up);
 EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
diff --git a/fs/namespace.c b/fs/namespace.c
index 3ddfd90..7b0b953 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -183,7 +183,7 @@
 unsigned int mnt_get_count(struct vfsmount *mnt)
 {
 #ifdef CONFIG_SMP
-	unsigned int count = atomic_read(&mnt->mnt_longrefs);
+	unsigned int count = 0;
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
@@ -217,7 +217,7 @@
 		if (!mnt->mnt_pcp)
 			goto out_free_devname;
 
-		atomic_set(&mnt->mnt_longrefs, 1);
+		this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
 #else
 		mnt->mnt_count = 1;
 		mnt->mnt_writers = 0;
@@ -611,6 +611,21 @@
 	list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
 }
 
+static inline void __mnt_make_longterm(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+	atomic_inc(&mnt->mnt_longterm);
+#endif
+}
+
+/* needs vfsmount lock for write */
+static inline void __mnt_make_shortterm(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+	atomic_dec(&mnt->mnt_longterm);
+#endif
+}
+
 /*
  * vfsmount lock must be held for write
  */
@@ -624,8 +639,11 @@
 	BUG_ON(parent == mnt);
 
 	list_add_tail(&head, &mnt->mnt_list);
-	list_for_each_entry(m, &head, mnt_list)
+	list_for_each_entry(m, &head, mnt_list) {
 		m->mnt_ns = n;
+		__mnt_make_longterm(m);
+	}
+
 	list_splice(&head, n->list.prev);
 
 	list_add_tail(&mnt->mnt_hash, mount_hashtable +
@@ -734,51 +752,30 @@
 	deactivate_super(sb);
 }
 
-#ifdef CONFIG_SMP
-static inline void __mntput(struct vfsmount *mnt, int longrefs)
+static void mntput_no_expire(struct vfsmount *mnt)
 {
-	if (!longrefs) {
 put_again:
-		br_read_lock(vfsmount_lock);
-		if (likely(atomic_read(&mnt->mnt_longrefs))) {
-			mnt_dec_count(mnt);
-			br_read_unlock(vfsmount_lock);
-			return;
-		}
+#ifdef CONFIG_SMP
+	br_read_lock(vfsmount_lock);
+	if (likely(atomic_read(&mnt->mnt_longterm))) {
+		mnt_dec_count(mnt);
 		br_read_unlock(vfsmount_lock);
-	} else {
-		BUG_ON(!atomic_read(&mnt->mnt_longrefs));
-		if (atomic_add_unless(&mnt->mnt_longrefs, -1, 1))
-			return;
+		return;
 	}
+	br_read_unlock(vfsmount_lock);
 
 	br_write_lock(vfsmount_lock);
-	if (!longrefs)
-		mnt_dec_count(mnt);
-	else
-		atomic_dec(&mnt->mnt_longrefs);
+	mnt_dec_count(mnt);
 	if (mnt_get_count(mnt)) {
 		br_write_unlock(vfsmount_lock);
 		return;
 	}
-	if (unlikely(mnt->mnt_pinned)) {
-		mnt_add_count(mnt, mnt->mnt_pinned + 1);
-		mnt->mnt_pinned = 0;
-		br_write_unlock(vfsmount_lock);
-		acct_auto_close_mnt(mnt);
-		goto put_again;
-	}
-	br_write_unlock(vfsmount_lock);
-	mntfree(mnt);
-}
 #else
-static inline void __mntput(struct vfsmount *mnt, int longrefs)
-{
-put_again:
 	mnt_dec_count(mnt);
 	if (likely(mnt_get_count(mnt)))
 		return;
 	br_write_lock(vfsmount_lock);
+#endif
 	if (unlikely(mnt->mnt_pinned)) {
 		mnt_add_count(mnt, mnt->mnt_pinned + 1);
 		mnt->mnt_pinned = 0;
@@ -789,12 +786,6 @@
 	br_write_unlock(vfsmount_lock);
 	mntfree(mnt);
 }
-#endif
-
-static void mntput_no_expire(struct vfsmount *mnt)
-{
-	__mntput(mnt, 0);
-}
 
 void mntput(struct vfsmount *mnt)
 {
@@ -802,7 +793,7 @@
 		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
 		if (unlikely(mnt->mnt_expiry_mark))
 			mnt->mnt_expiry_mark = 0;
-		__mntput(mnt, 0);
+		mntput_no_expire(mnt);
 	}
 }
 EXPORT_SYMBOL(mntput);
@@ -815,33 +806,6 @@
 }
 EXPORT_SYMBOL(mntget);
 
-void mntput_long(struct vfsmount *mnt)
-{
-#ifdef CONFIG_SMP
-	if (mnt) {
-		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
-		if (unlikely(mnt->mnt_expiry_mark))
-			mnt->mnt_expiry_mark = 0;
-		__mntput(mnt, 1);
-	}
-#else
-	mntput(mnt);
-#endif
-}
-EXPORT_SYMBOL(mntput_long);
-
-struct vfsmount *mntget_long(struct vfsmount *mnt)
-{
-#ifdef CONFIG_SMP
-	if (mnt)
-		atomic_inc(&mnt->mnt_longrefs);
-	return mnt;
-#else
-	return mntget(mnt);
-#endif
-}
-EXPORT_SYMBOL(mntget_long);
-
 void mnt_pin(struct vfsmount *mnt)
 {
 	br_write_lock(vfsmount_lock);
@@ -1216,7 +1180,7 @@
 			dput(dentry);
 			mntput(m);
 		}
-		mntput_long(mnt);
+		mntput(mnt);
 	}
 }
 
@@ -1226,19 +1190,21 @@
  */
 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
 {
+	LIST_HEAD(tmp_list);
 	struct vfsmount *p;
 
 	for (p = mnt; p; p = next_mnt(p, mnt))
-		list_move(&p->mnt_hash, kill);
+		list_move(&p->mnt_hash, &tmp_list);
 
 	if (propagate)
-		propagate_umount(kill);
+		propagate_umount(&tmp_list);
 
-	list_for_each_entry(p, kill, mnt_hash) {
+	list_for_each_entry(p, &tmp_list, mnt_hash) {
 		list_del_init(&p->mnt_expire);
 		list_del_init(&p->mnt_list);
 		__touch_mnt_namespace(p->mnt_ns);
 		p->mnt_ns = NULL;
+		__mnt_make_shortterm(p);
 		list_del_init(&p->mnt_child);
 		if (p->mnt_parent != p) {
 			p->mnt_parent->mnt_ghosts++;
@@ -1246,6 +1212,7 @@
 		}
 		change_mnt_propagation(p, MS_PRIVATE);
 	}
+	list_splice(&tmp_list, kill);
 }
 
 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
@@ -1844,9 +1811,10 @@
 		return err;
 
 	down_write(&namespace_sem);
-	while (d_mountpoint(path->dentry) &&
-	       follow_down(path))
-		;
+	err = follow_down(path, true);
+	if (err < 0)
+		goto out;
+
 	err = -EINVAL;
 	if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
 		goto out;
@@ -1904,6 +1872,8 @@
 	return err;
 }
 
+static int do_add_mount(struct vfsmount *, struct path *, int);
+
 /*
  * create a new mount for userspace and request it to be added into the
  * namespace's tree
@@ -1912,6 +1882,7 @@
 			int mnt_flags, char *name, void *data)
 {
 	struct vfsmount *mnt;
+	int err;
 
 	if (!type)
 		return -EINVAL;
@@ -1924,15 +1895,47 @@
 	if (IS_ERR(mnt))
 		return PTR_ERR(mnt);
 
-	return do_add_mount(mnt, path, mnt_flags, NULL);
+	err = do_add_mount(mnt, path, mnt_flags);
+	if (err)
+		mntput(mnt);
+	return err;
+}
+
+int finish_automount(struct vfsmount *m, struct path *path)
+{
+	int err;
+	/* The new mount record should have at least 2 refs to prevent it being
+	 * expired before we get a chance to add it
+	 */
+	BUG_ON(mnt_get_count(m) < 2);
+
+	if (m->mnt_sb == path->mnt->mnt_sb &&
+	    m->mnt_root == path->dentry) {
+		err = -ELOOP;
+		goto fail;
+	}
+
+	err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
+	if (!err)
+		return 0;
+fail:
+	/* remove m from any expiration list it may be on */
+	if (!list_empty(&m->mnt_expire)) {
+		down_write(&namespace_sem);
+		br_write_lock(vfsmount_lock);
+		list_del_init(&m->mnt_expire);
+		br_write_unlock(vfsmount_lock);
+		up_write(&namespace_sem);
+	}
+	mntput(m);
+	mntput(m);
+	return err;
 }
 
 /*
  * add a mount into a namespace's mount tree
- * - provide the option of adding the new mount to an expiration list
  */
-int do_add_mount(struct vfsmount *newmnt, struct path *path,
-		 int mnt_flags, struct list_head *fslist)
+static int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags)
 {
 	int err;
 
@@ -1940,9 +1943,10 @@
 
 	down_write(&namespace_sem);
 	/* Something was mounted here while we slept */
-	while (d_mountpoint(path->dentry) &&
-	       follow_down(path))
-		;
+	err = follow_down(path, true);
+	if (err < 0)
+		goto unlock;
+
 	err = -EINVAL;
 	if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
 		goto unlock;
@@ -1958,22 +1962,29 @@
 		goto unlock;
 
 	newmnt->mnt_flags = mnt_flags;
-	if ((err = graft_tree(newmnt, path)))
-		goto unlock;
-
-	if (fslist) /* add to the specified expiration list */
-		list_add_tail(&newmnt->mnt_expire, fslist);
-
-	up_write(&namespace_sem);
-	return 0;
+	err = graft_tree(newmnt, path);
 
 unlock:
 	up_write(&namespace_sem);
-	mntput_long(newmnt);
 	return err;
 }
 
-EXPORT_SYMBOL_GPL(do_add_mount);
+/**
+ * mnt_set_expiry - Put a mount on an expiration list
+ * @mnt: The mount to list.
+ * @expiry_list: The list to add the mount to.
+ */
+void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
+{
+	down_write(&namespace_sem);
+	br_write_lock(vfsmount_lock);
+
+	list_add_tail(&mnt->mnt_expire, expiry_list);
+
+	br_write_unlock(vfsmount_lock);
+	up_write(&namespace_sem);
+}
+EXPORT_SYMBOL(mnt_set_expiry);
 
 /*
  * process a list of expirable mountpoints with the intent of discarding any
@@ -2262,6 +2273,22 @@
 	return new_ns;
 }
 
+void mnt_make_longterm(struct vfsmount *mnt)
+{
+	__mnt_make_longterm(mnt);
+}
+
+void mnt_make_shortterm(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+	if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
+		return;
+	br_write_lock(vfsmount_lock);
+	atomic_dec(&mnt->mnt_longterm);
+	br_write_unlock(vfsmount_lock);
+#endif
+}
+
 /*
  * Allocate a new namespace structure and populate it with contents
  * copied from the namespace of the passed in task structure.
@@ -2299,14 +2326,19 @@
 	q = new_ns->root;
 	while (p) {
 		q->mnt_ns = new_ns;
+		__mnt_make_longterm(q);
 		if (fs) {
 			if (p == fs->root.mnt) {
+				fs->root.mnt = mntget(q);
+				__mnt_make_longterm(q);
+				mnt_make_shortterm(p);
 				rootmnt = p;
-				fs->root.mnt = mntget_long(q);
 			}
 			if (p == fs->pwd.mnt) {
+				fs->pwd.mnt = mntget(q);
+				__mnt_make_longterm(q);
+				mnt_make_shortterm(p);
 				pwdmnt = p;
-				fs->pwd.mnt = mntget_long(q);
 			}
 		}
 		p = next_mnt(p, mnt_ns->root);
@@ -2315,9 +2347,9 @@
 	up_write(&namespace_sem);
 
 	if (rootmnt)
-		mntput_long(rootmnt);
+		mntput(rootmnt);
 	if (pwdmnt)
-		mntput_long(pwdmnt);
+		mntput(pwdmnt);
 
 	return new_ns;
 }
@@ -2350,6 +2382,7 @@
 	new_ns = alloc_mnt_ns();
 	if (!IS_ERR(new_ns)) {
 		mnt->mnt_ns = new_ns;
+		__mnt_make_longterm(mnt);
 		new_ns->root = mnt;
 		list_add(&new_ns->list, &new_ns->root->mnt_list);
 	}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index df8c03a..2c3eb33 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -970,7 +970,7 @@
 {
 	struct nfs_server *server = NFS_SERVER(inode);
 
-	if (test_bit(NFS_INO_MOUNTPOINT, &NFS_I(inode)->flags))
+	if (IS_AUTOMOUNT(inode))
 		return 0;
 	if (nd != NULL) {
 		/* VFS wants an on-the-wire revalidation */
@@ -1173,6 +1173,7 @@
 	.d_revalidate	= nfs_lookup_revalidate,
 	.d_delete	= nfs_dentry_delete,
 	.d_iput		= nfs_dentry_iput,
+	.d_automount	= nfs_d_automount,
 };
 
 static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
@@ -1246,6 +1247,7 @@
 	.d_revalidate	= nfs_open_revalidate,
 	.d_delete	= nfs_dentry_delete,
 	.d_iput		= nfs_dentry_iput,
+	.d_automount	= nfs_d_automount,
 };
 
 /*
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index ce00b70..d851242 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -300,7 +300,7 @@
 				else
 					inode->i_op = &nfs_mountpoint_inode_operations;
 				inode->i_fop = NULL;
-				set_bit(NFS_INO_MOUNTPOINT, &nfsi->flags);
+				inode->i_flags |= S_AUTOMOUNT;
 			}
 		} else if (S_ISLNK(inode->i_mode))
 			inode->i_op = &nfs_symlink_inode_operations;
@@ -1208,7 +1208,7 @@
 	/* Update the fsid? */
 	if (S_ISDIR(inode->i_mode) && (fattr->valid & NFS_ATTR_FATTR_FSID) &&
 			!nfs_fsid_equal(&server->fsid, &fattr->fsid) &&
-			!test_bit(NFS_INO_MOUNTPOINT, &nfsi->flags))
+			!IS_AUTOMOUNT(inode))
 		server->fsid = fattr->fsid;
 
 	/*
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index bfa3a34..4644f04 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -252,6 +252,7 @@
 		      const struct dentry *droot,
 		      const struct dentry *dentry,
 		      char *buffer, ssize_t buflen);
+extern struct vfsmount *nfs_d_automount(struct path *path);
 
 /* getroot.c */
 extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 74aaf39..f32b860 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -97,9 +97,8 @@
 }
 
 /*
- * nfs_follow_mountpoint - handle crossing a mountpoint on the server
- * @dentry - dentry of mountpoint
- * @nd - nameidata info
+ * nfs_d_automount - Handle crossing a mountpoint on the server
+ * @path - The mountpoint
  *
  * When we encounter a mountpoint on the server, we want to set up
  * a mountpoint on the client too, to prevent inode numbers from
@@ -109,87 +108,65 @@
  * situation, and that different filesystems may want to use
  * different security flavours.
  */
-static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+struct vfsmount *nfs_d_automount(struct path *path)
 {
 	struct vfsmount *mnt;
-	struct nfs_server *server = NFS_SERVER(dentry->d_inode);
+	struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
 	struct dentry *parent;
 	struct nfs_fh *fh = NULL;
 	struct nfs_fattr *fattr = NULL;
 	int err;
 
-	dprintk("--> nfs_follow_mountpoint()\n");
+	dprintk("--> nfs_d_automount()\n");
 
-	err = -ESTALE;
-	if (IS_ROOT(dentry))
-		goto out_err;
+	mnt = ERR_PTR(-ESTALE);
+	if (IS_ROOT(path->dentry))
+		goto out_nofree;
 
-	err = -ENOMEM;
+	mnt = ERR_PTR(-ENOMEM);
 	fh = nfs_alloc_fhandle();
 	fattr = nfs_alloc_fattr();
 	if (fh == NULL || fattr == NULL)
-		goto out_err;
+		goto out;
 
 	dprintk("%s: enter\n", __func__);
-	dput(nd->path.dentry);
-	nd->path.dentry = dget(dentry);
 
-	/* Look it up again */
-	parent = dget_parent(nd->path.dentry);
+	/* Look it up again to get its attributes */
+	parent = dget_parent(path->dentry);
 	err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
-						  &nd->path.dentry->d_name,
+						  &path->dentry->d_name,
 						  fh, fattr);
 	dput(parent);
-	if (err != 0)
-		goto out_err;
+	if (err != 0) {
+		mnt = ERR_PTR(err);
+		goto out;
+	}
 
 	if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
-		mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
+		mnt = nfs_do_refmount(path->mnt, path->dentry);
 	else
-		mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, fh,
-				      fattr);
-	err = PTR_ERR(mnt);
+		mnt = nfs_do_submount(path->mnt, path->dentry, fh, fattr);
 	if (IS_ERR(mnt))
-		goto out_err;
+		goto out;
 
-	mntget(mnt);
-	err = do_add_mount(mnt, &nd->path, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
-			   &nfs_automount_list);
-	if (err < 0) {
-		mntput(mnt);
-		if (err == -EBUSY)
-			goto out_follow;
-		goto out_err;
-	}
-	path_put(&nd->path);
-	nd->path.mnt = mnt;
-	nd->path.dentry = dget(mnt->mnt_root);
+	dprintk("%s: done, success\n", __func__);
+	mntget(mnt); /* prevent immediate expiration */
+	mnt_set_expiry(mnt, &nfs_automount_list);
 	schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+
 out:
 	nfs_free_fattr(fattr);
 	nfs_free_fhandle(fh);
-	dprintk("%s: done, returned %d\n", __func__, err);
-
-	dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
-	return ERR_PTR(err);
-out_err:
-	path_put(&nd->path);
-	goto out;
-out_follow:
-	while (d_mountpoint(nd->path.dentry) &&
-	       follow_down(&nd->path))
-		;
-	err = 0;
-	goto out;
+out_nofree:
+	dprintk("<-- nfs_follow_mountpoint() = %p\n", mnt);
+	return mnt;
 }
 
 const struct inode_operations nfs_mountpoint_inode_operations = {
-	.follow_link	= nfs_follow_mountpoint,
 	.getattr	= nfs_getattr,
 };
 
 const struct inode_operations nfs_referral_inode_operations = {
-	.follow_link	= nfs_follow_mountpoint,
 };
 
 static void nfs_expire_automounts(struct work_struct *work)
diff --git a/include/linux/nfs4_acl.h b/fs/nfsd/acl.h
similarity index 97%
rename from include/linux/nfs4_acl.h
rename to fs/nfsd/acl.h
index c9c05a7..34e5c40 100644
--- a/include/linux/nfs4_acl.h
+++ b/fs/nfsd/acl.h
@@ -1,6 +1,4 @@
 /*
- *  include/linux/nfs4_acl.c
- *
  *  Common NFSv4 ACL handling definitions.
  *
  *  Copyright (c) 2002 The Regents of the University of Michigan.
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c0fcb7a..8b31e5f 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1,4 +1,3 @@
-#define MSNFS	/* HACK HACK */
 /*
  * NFS exporting and validation.
  *
@@ -1444,9 +1443,6 @@
 	{ NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
 	{ NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
 	{ NFSEXP_V4ROOT, {"v4root", ""}},
-#ifdef MSNFS
-	{ NFSEXP_MSNFS, {"msnfs", ""}},
-#endif
 	{ 0, {"", ""}}
 };
 
diff --git a/include/linux/nfsd_idmap.h b/fs/nfsd/idmap.h
similarity index 91%
rename from include/linux/nfsd_idmap.h
rename to fs/nfsd/idmap.h
index d4a2ac1..2f3be13 100644
--- a/include/linux/nfsd_idmap.h
+++ b/fs/nfsd/idmap.h
@@ -1,6 +1,4 @@
 /*
- *  include/linux/nfsd_idmap.h
- *
  *  Mapping of UID to name and vice versa.
  *
  *  Copyright (c) 2002, 2003 The Regents of the University of
@@ -56,8 +54,8 @@
 }
 #endif
 
-int nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
-int nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
+__be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
+__be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
 int nfsd_map_uid_to_name(struct svc_rqst *, __u32, char *);
 int nfsd_map_gid_to_name(struct svc_rqst *, __u32, char *);
 
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 5b7e302..2247fc9 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -151,10 +151,10 @@
 	__be32	nfserr;
 	u32	max_blocksize = svc_max_payload(rqstp);
 
-	dprintk("nfsd: READ(3) %s %lu bytes at %lu\n",
+	dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
 				SVCFH_fmt(&argp->fh),
 				(unsigned long) argp->count,
-				(unsigned long) argp->offset);
+				(unsigned long long) argp->offset);
 
 	/* Obtain buffer pointer for payload.
 	 * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof)
@@ -191,10 +191,10 @@
 	__be32	nfserr;
 	unsigned long cnt = argp->len;
 
-	dprintk("nfsd: WRITE(3)    %s %d bytes at %ld%s\n",
+	dprintk("nfsd: WRITE(3)    %s %d bytes at %Lu%s\n",
 				SVCFH_fmt(&argp->fh),
 				argp->len,
-				(unsigned long) argp->offset,
+				(unsigned long long) argp->offset,
 				argp->stable? " stable" : "");
 
 	fh_copy(&resp->fh, &argp->fh);
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index e480526..ad88f1c 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -36,7 +36,7 @@
 
 #include <linux/slab.h>
 #include <linux/nfs_fs.h>
-#include <linux/nfs4_acl.h>
+#include "acl.h"
 
 
 /* mode bit translations: */
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 21a63da..3be975e 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -628,10 +628,8 @@
 	return max(nfsd4_lease/10, (time_t)1) * HZ;
 }
 
-/* Reference counting, callback cleanup, etc., all look racy as heck.
- * And why is cl_cb_set an atomic? */
 
-int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
+static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
 {
 	struct rpc_timeout	timeparms = {
 		.to_initval	= max_cb_time(),
@@ -641,6 +639,7 @@
 		.net		= &init_net,
 		.address	= (struct sockaddr *) &conn->cb_addr,
 		.addrsize	= conn->cb_addrlen,
+		.saddress	= (struct sockaddr *) &conn->cb_saddr,
 		.timeout	= &timeparms,
 		.program	= &cb_program,
 		.version	= 0,
@@ -657,6 +656,10 @@
 		args.protocol = XPRT_TRANSPORT_TCP;
 		clp->cl_cb_ident = conn->cb_ident;
 	} else {
+		if (!conn->cb_xprt)
+			return -EINVAL;
+		clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
+		clp->cl_cb_session = ses;
 		args.bc_xprt = conn->cb_xprt;
 		args.prognumber = clp->cl_cb_session->se_cb_prog;
 		args.protocol = XPRT_TRANSPORT_BC_TCP;
@@ -679,14 +682,20 @@
 		(int)clp->cl_name.len, clp->cl_name.data, reason);
 }
 
+static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
+{
+	clp->cl_cb_state = NFSD4_CB_DOWN;
+	warn_no_callback_path(clp, reason);
+}
+
 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
 {
 	struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
 
 	if (task->tk_status)
-		warn_no_callback_path(clp, task->tk_status);
+		nfsd4_mark_cb_down(clp, task->tk_status);
 	else
-		atomic_set(&clp->cl_cb_set, 1);
+		clp->cl_cb_state = NFSD4_CB_UP;
 }
 
 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
@@ -709,6 +718,11 @@
 
 static struct workqueue_struct *callback_wq;
 
+static void run_nfsd4_cb(struct nfsd4_callback *cb)
+{
+	queue_work(callback_wq, &cb->cb_work);
+}
+
 static void do_probe_callback(struct nfs4_client *clp)
 {
 	struct nfsd4_callback *cb = &clp->cl_cb_null;
@@ -723,7 +737,7 @@
 
 	cb->cb_ops = &nfsd4_cb_probe_ops;
 
-	queue_work(callback_wq, &cb->cb_work);
+	run_nfsd4_cb(cb);
 }
 
 /*
@@ -732,14 +746,21 @@
  */
 void nfsd4_probe_callback(struct nfs4_client *clp)
 {
+	/* XXX: atomicity?  Also, should we be using cl_cb_flags? */
+	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
 	set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
 	do_probe_callback(clp);
 }
 
+void nfsd4_probe_callback_sync(struct nfs4_client *clp)
+{
+	nfsd4_probe_callback(clp);
+	flush_workqueue(callback_wq);
+}
+
 void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
 {
-	BUG_ON(atomic_read(&clp->cl_cb_set));
-
+	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
 	spin_lock(&clp->cl_lock);
 	memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
 	spin_unlock(&clp->cl_lock);
@@ -750,24 +771,14 @@
  * If the slot is available, then mark it busy.  Otherwise, set the
  * thread for sleeping on the callback RPC wait queue.
  */
-static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
-		struct rpc_task *task)
+static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
 {
-	u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
-	int status = 0;
-
-	dprintk("%s: %u:%u:%u:%u\n", __func__,
-		ptr[0], ptr[1], ptr[2], ptr[3]);
-
 	if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
 		rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
 		dprintk("%s slot is busy\n", __func__);
-		status = -EAGAIN;
-		goto out;
+		return false;
 	}
-out:
-	dprintk("%s status=%d\n", __func__, status);
-	return status;
+	return true;
 }
 
 /*
@@ -780,20 +791,19 @@
 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
 	struct nfs4_client *clp = dp->dl_client;
 	u32 minorversion = clp->cl_minorversion;
-	int status = 0;
 
 	cb->cb_minorversion = minorversion;
 	if (minorversion) {
-		status = nfsd41_cb_setup_sequence(clp, task);
-		if (status) {
-			if (status != -EAGAIN) {
-				/* terminate rpc task */
-				task->tk_status = status;
-				task->tk_action = NULL;
-			}
+		if (!nfsd41_cb_get_slot(clp, task))
 			return;
-		}
 	}
+	spin_lock(&clp->cl_lock);
+	if (list_empty(&cb->cb_per_client)) {
+		/* This is the first call, not a restart */
+		cb->cb_done = false;
+		list_add(&cb->cb_per_client, &clp->cl_callbacks);
+	}
+	spin_unlock(&clp->cl_lock);
 	rpc_call_start(task);
 }
 
@@ -829,15 +839,18 @@
 
 	nfsd4_cb_done(task, calldata);
 
-	if (current_rpc_client == NULL) {
-		/* We're shutting down; give up. */
-		/* XXX: err, or is it ok just to fall through
-		 * and rpc_restart_call? */
+	if (current_rpc_client != task->tk_client) {
+		/* We're shutting down or changing cl_cb_client; leave
+		 * it to nfsd4_process_cb_update to restart the call if
+		 * necessary. */
 		return;
 	}
 
+	if (cb->cb_done)
+		return;
 	switch (task->tk_status) {
 	case 0:
+		cb->cb_done = true;
 		return;
 	case -EBADHANDLE:
 	case -NFS4ERR_BAD_STATEID:
@@ -846,32 +859,30 @@
 		break;
 	default:
 		/* Network partition? */
-		atomic_set(&clp->cl_cb_set, 0);
-		warn_no_callback_path(clp, task->tk_status);
-		if (current_rpc_client != task->tk_client) {
-			/* queue a callback on the new connection: */
-			atomic_inc(&dp->dl_count);
-			nfsd4_cb_recall(dp);
-			return;
-		}
+		nfsd4_mark_cb_down(clp, task->tk_status);
 	}
 	if (dp->dl_retries--) {
 		rpc_delay(task, 2*HZ);
 		task->tk_status = 0;
 		rpc_restart_call_prepare(task);
 		return;
-	} else {
-		atomic_set(&clp->cl_cb_set, 0);
-		warn_no_callback_path(clp, task->tk_status);
 	}
+	nfsd4_mark_cb_down(clp, task->tk_status);
+	cb->cb_done = true;
 }
 
 static void nfsd4_cb_recall_release(void *calldata)
 {
 	struct nfsd4_callback *cb = calldata;
+	struct nfs4_client *clp = cb->cb_clp;
 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
 
-	nfs4_put_delegation(dp);
+	if (cb->cb_done) {
+		spin_lock(&clp->cl_lock);
+		list_del(&cb->cb_per_client);
+		spin_unlock(&clp->cl_lock);
+		nfs4_put_delegation(dp);
+	}
 }
 
 static const struct rpc_call_ops nfsd4_cb_recall_ops = {
@@ -906,16 +917,33 @@
 	flush_workqueue(callback_wq);
 }
 
-void nfsd4_release_cb(struct nfsd4_callback *cb)
+static void nfsd4_release_cb(struct nfsd4_callback *cb)
 {
 	if (cb->cb_ops->rpc_release)
 		cb->cb_ops->rpc_release(cb);
 }
 
-void nfsd4_process_cb_update(struct nfsd4_callback *cb)
+/* requires cl_lock: */
+static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
+{
+	struct nfsd4_session *s;
+	struct nfsd4_conn *c;
+
+	list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
+		list_for_each_entry(c, &s->se_conns, cn_persession) {
+			if (c->cn_flags & NFS4_CDFC4_BACK)
+				return c;
+		}
+	}
+	return NULL;
+}
+
+static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
 {
 	struct nfs4_cb_conn conn;
 	struct nfs4_client *clp = cb->cb_clp;
+	struct nfsd4_session *ses = NULL;
+	struct nfsd4_conn *c;
 	int err;
 
 	/*
@@ -926,6 +954,10 @@
 		rpc_shutdown_client(clp->cl_cb_client);
 		clp->cl_cb_client = NULL;
 	}
+	if (clp->cl_cb_conn.cb_xprt) {
+		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
+		clp->cl_cb_conn.cb_xprt = NULL;
+	}
 	if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
 		return;
 	spin_lock(&clp->cl_lock);
@@ -936,11 +968,22 @@
 	BUG_ON(!clp->cl_cb_flags);
 	clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
 	memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
+	c = __nfsd4_find_backchannel(clp);
+	if (c) {
+		svc_xprt_get(c->cn_xprt);
+		conn.cb_xprt = c->cn_xprt;
+		ses = c->cn_session;
+	}
 	spin_unlock(&clp->cl_lock);
 
-	err = setup_callback_client(clp, &conn);
-	if (err)
+	err = setup_callback_client(clp, &conn, ses);
+	if (err) {
 		warn_no_callback_path(clp, err);
+		return;
+	}
+	/* Yay, the callback channel's back! Restart any callbacks: */
+	list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client)
+		run_nfsd4_cb(cb);
 }
 
 void nfsd4_do_callback_rpc(struct work_struct *w)
@@ -965,10 +1008,11 @@
 void nfsd4_cb_recall(struct nfs4_delegation *dp)
 {
 	struct nfsd4_callback *cb = &dp->dl_recall;
+	struct nfs4_client *clp = dp->dl_client;
 
 	dp->dl_retries = 1;
 	cb->cb_op = dp;
-	cb->cb_clp = dp->dl_client;
+	cb->cb_clp = clp;
 	cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
 	cb->cb_msg.rpc_argp = cb;
 	cb->cb_msg.rpc_resp = cb;
@@ -977,5 +1021,8 @@
 	cb->cb_ops = &nfsd4_cb_recall_ops;
 	dp->dl_retries = 1;
 
-	queue_work(callback_wq, &dp->dl_recall.cb_work);
+	INIT_LIST_HEAD(&cb->cb_per_client);
+	cb->cb_done = true;
+
+	run_nfsd4_cb(&dp->dl_recall);
 }
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index f0695e8..6d2c397 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -33,10 +33,11 @@
  */
 
 #include <linux/module.h>
-#include <linux/nfsd_idmap.h>
 #include <linux/seq_file.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include "idmap.h"
+#include "nfsd.h"
 
 /*
  * Cache entry
@@ -514,7 +515,7 @@
 	return clp->name;
 }
 
-static int
+static __be32
 idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
 		uid_t *id)
 {
@@ -524,15 +525,15 @@
 	int ret;
 
 	if (namelen + 1 > sizeof(key.name))
-		return -EINVAL;
+		return nfserr_badowner;
 	memcpy(key.name, name, namelen);
 	key.name[namelen] = '\0';
 	strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
 	ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
 	if (ret == -ENOENT)
-		ret = -ESRCH; /* nfserr_badname */
+		return nfserr_badowner;
 	if (ret)
-		return ret;
+		return nfserrno(ret);
 	*id = item->id;
 	cache_put(&item->h, &nametoid_cache);
 	return 0;
@@ -560,14 +561,14 @@
 	return ret;
 }
 
-int
+__be32
 nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
 		__u32 *id)
 {
 	return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
 }
 
-int
+__be32
 nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
 		__u32 *id)
 {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0cdfd02..db52546 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -604,9 +604,7 @@
 	return status;
 }
 
-static __be32
-nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
-	      void *arg)
+static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
 {
 	struct svc_fh tmp_fh;
 	__be32 ret;
@@ -615,13 +613,19 @@
 	ret = exp_pseudoroot(rqstp, &tmp_fh);
 	if (ret)
 		return ret;
-	if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) {
+	if (tmp_fh.fh_dentry == fh->fh_dentry) {
 		fh_put(&tmp_fh);
 		return nfserr_noent;
 	}
 	fh_put(&tmp_fh);
-	return nfsd_lookup(rqstp, &cstate->current_fh,
-			   "..", 2, &cstate->current_fh);
+	return nfsd_lookup(rqstp, fh, "..", 2, fh);
+}
+
+static __be32
+nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+	      void *arg)
+{
+	return nfsd4_do_lookupp(rqstp, &cstate->current_fh);
 }
 
 static __be32
@@ -769,10 +773,36 @@
 	} else
 		secinfo->si_exp = exp;
 	dput(dentry);
+	if (cstate->minorversion)
+		/* See rfc 5661 section 2.6.3.1.1.8 */
+		fh_put(&cstate->current_fh);
 	return err;
 }
 
 static __be32
+nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+	      struct nfsd4_secinfo_no_name *sin)
+{
+	__be32 err;
+
+	switch (sin->sin_style) {
+	case NFS4_SECINFO_STYLE4_CURRENT_FH:
+		break;
+	case NFS4_SECINFO_STYLE4_PARENT:
+		err = nfsd4_do_lookupp(rqstp, &cstate->current_fh);
+		if (err)
+			return err;
+		break;
+	default:
+		return nfserr_inval;
+	}
+	exp_get(cstate->current_fh.fh_export);
+	sin->sin_exp = cstate->current_fh.fh_export;
+	fh_put(&cstate->current_fh);
+	return nfs_ok;
+}
+
+static __be32
 nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	      struct nfsd4_setattr *setattr)
 {
@@ -974,8 +1004,8 @@
  * Also note, enforced elsewhere:
  *	- SEQUENCE other than as first op results in
  *	  NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().)
- *	- BIND_CONN_TO_SESSION must be the only op in its compound
- *	  (Will be enforced in nfsd4_bind_conn_to_session().)
+ *	- BIND_CONN_TO_SESSION must be the only op in its compound.
+ *	  (Enforced in nfsd4_bind_conn_to_session().)
  *	- DESTROY_SESSION must be the final operation in a compound, if
  *	  sessionid's in SEQUENCE and DESTROY_SESSION are the same.
  *	  (Enforced in nfsd4_destroy_session().)
@@ -1126,10 +1156,6 @@
 
 		nfsd4_increment_op_stats(op->opnum);
 	}
-	if (!rqstp->rq_usedeferral && status == nfserr_dropit) {
-		dprintk("%s Dropit - send NFS4ERR_DELAY\n", __func__);
-		status = nfserr_jukebox;
-	}
 
 	resp->cstate.status = status;
 	fh_put(&resp->cstate.current_fh);
@@ -1300,6 +1326,11 @@
 		.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
 		.op_name = "OP_EXCHANGE_ID",
 	},
+	[OP_BIND_CONN_TO_SESSION] = {
+		.op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
+		.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+		.op_name = "OP_BIND_CONN_TO_SESSION",
+	},
 	[OP_CREATE_SESSION] = {
 		.op_func = (nfsd4op_func)nfsd4_create_session,
 		.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
@@ -1320,6 +1351,10 @@
 		.op_flags = ALLOWED_WITHOUT_FH,
 		.op_name = "OP_RECLAIM_COMPLETE",
 	},
+	[OP_SECINFO_NO_NAME] = {
+		.op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
+		.op_name = "OP_SECINFO_NO_NAME",
+	},
 };
 
 static const char *nfsd4_op_name(unsigned opnum)
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 7e26caa..ffb59ef 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -302,7 +302,6 @@
 {
 	int status;
 
-	/* note: we currently use this path only for minorversion 0 */
 	if (nfs4_has_reclaimed_state(child->d_name.name, false))
 		return 0;
 
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index fbd18c3..d98d021 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -230,7 +230,8 @@
 	dp->dl_client = clp;
 	get_nfs4_file(fp);
 	dp->dl_file = fp;
-	nfs4_file_get_access(fp, O_RDONLY);
+	dp->dl_vfs_file = find_readable_file(fp);
+	get_file(dp->dl_vfs_file);
 	dp->dl_flock = NULL;
 	dp->dl_type = type;
 	dp->dl_stateid.si_boot = boot_time;
@@ -252,6 +253,7 @@
 	if (atomic_dec_and_test(&dp->dl_count)) {
 		dprintk("NFSD: freeing dp %p\n",dp);
 		put_nfs4_file(dp->dl_file);
+		fput(dp->dl_vfs_file);
 		kmem_cache_free(deleg_slab, dp);
 		num_delegations--;
 	}
@@ -265,12 +267,10 @@
 static void
 nfs4_close_delegation(struct nfs4_delegation *dp)
 {
-	struct file *filp = find_readable_file(dp->dl_file);
-
 	dprintk("NFSD: close_delegation dp %p\n",dp);
+	/* XXX: do we even need this check?: */
 	if (dp->dl_flock)
-		vfs_setlease(filp, F_UNLCK, &dp->dl_flock);
-	nfs4_file_put_access(dp->dl_file, O_RDONLY);
+		vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock);
 }
 
 /* Called under the state lock. */
@@ -642,6 +642,7 @@
 		free_conn(c);
 	}
 	spin_unlock(&clp->cl_lock);
+	nfsd4_probe_callback(clp);
 }
 
 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
@@ -679,15 +680,12 @@
 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
 }
 
-static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
+static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
 {
 	struct nfsd4_conn *conn;
-	u32 flags = NFS4_CDFC4_FORE;
 	int ret;
 
-	if (ses->se_flags & SESSION4_BACK_CHAN)
-		flags |= NFS4_CDFC4_BACK;
-	conn = alloc_conn(rqstp, flags);
+	conn = alloc_conn(rqstp, dir);
 	if (!conn)
 		return nfserr_jukebox;
 	nfsd4_hash_conn(conn, ses);
@@ -698,6 +696,17 @@
 	return nfs_ok;
 }
 
+static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
+{
+	u32 dir = NFS4_CDFC4_FORE;
+
+	if (ses->se_flags & SESSION4_BACK_CHAN)
+		dir |= NFS4_CDFC4_BACK;
+
+	return nfsd4_new_conn(rqstp, ses, dir);
+}
+
+/* must be called under client_lock */
 static void nfsd4_del_conns(struct nfsd4_session *s)
 {
 	struct nfs4_client *clp = s->se_client;
@@ -749,6 +758,8 @@
 	 */
 	slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
 	numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
+	if (numslots < 1)
+		return NULL;
 
 	new = alloc_session(slotsize, numslots);
 	if (!new) {
@@ -769,25 +780,30 @@
 	idx = hash_sessionid(&new->se_sessionid);
 	spin_lock(&client_lock);
 	list_add(&new->se_hash, &sessionid_hashtbl[idx]);
+	spin_lock(&clp->cl_lock);
 	list_add(&new->se_perclnt, &clp->cl_sessions);
+	spin_unlock(&clp->cl_lock);
 	spin_unlock(&client_lock);
 
-	status = nfsd4_new_conn(rqstp, new);
+	status = nfsd4_new_conn_from_crses(rqstp, new);
 	/* whoops: benny points out, status is ignored! (err, or bogus) */
 	if (status) {
 		free_session(&new->se_ref);
 		return NULL;
 	}
-	if (!clp->cl_cb_session && (cses->flags & SESSION4_BACK_CHAN)) {
+	if (cses->flags & SESSION4_BACK_CHAN) {
 		struct sockaddr *sa = svc_addr(rqstp);
-
-		clp->cl_cb_session = new;
-		clp->cl_cb_conn.cb_xprt = rqstp->rq_xprt;
-		svc_xprt_get(rqstp->rq_xprt);
+		/*
+		 * This is a little silly; with sessions there's no real
+		 * use for the callback address.  Use the peer address
+		 * as a reasonable default for now, but consider fixing
+		 * the rpc client not to require an address in the
+		 * future:
+		 */
 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
-		nfsd4_probe_callback(clp);
 	}
+	nfsd4_probe_callback(clp);
 	return new;
 }
 
@@ -817,7 +833,9 @@
 unhash_session(struct nfsd4_session *ses)
 {
 	list_del(&ses->se_hash);
+	spin_lock(&ses->se_client->cl_lock);
 	list_del(&ses->se_perclnt);
+	spin_unlock(&ses->se_client->cl_lock);
 }
 
 /* must be called under the client_lock */
@@ -923,8 +941,10 @@
 
 	mark_client_expired(clp);
 	list_del(&clp->cl_lru);
+	spin_lock(&clp->cl_lock);
 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
 		list_del_init(&ses->se_hash);
+	spin_unlock(&clp->cl_lock);
 }
 
 static void
@@ -1051,12 +1071,13 @@
 
 	memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
 	atomic_set(&clp->cl_refcount, 0);
-	atomic_set(&clp->cl_cb_set, 0);
+	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
 	INIT_LIST_HEAD(&clp->cl_idhash);
 	INIT_LIST_HEAD(&clp->cl_strhash);
 	INIT_LIST_HEAD(&clp->cl_openowners);
 	INIT_LIST_HEAD(&clp->cl_delegations);
 	INIT_LIST_HEAD(&clp->cl_lru);
+	INIT_LIST_HEAD(&clp->cl_callbacks);
 	spin_lock_init(&clp->cl_lock);
 	INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
 	clp->cl_time = get_seconds();
@@ -1132,54 +1153,55 @@
 	return NULL;
 }
 
-/*
- * Return 1 iff clp's clientid establishment method matches the use_exchange_id
- * parameter. Matching is based on the fact the at least one of the
- * EXCHGID4_FLAG_USE_{NON_PNFS,PNFS_MDS,PNFS_DS} flags must be set for v4.1
- *
- * FIXME: we need to unify the clientid namespaces for nfsv4.x
- * and correctly deal with client upgrade/downgrade in EXCHANGE_ID
- * and SET_CLIENTID{,_CONFIRM}
- */
-static inline int
-match_clientid_establishment(struct nfs4_client *clp, bool use_exchange_id)
+static bool clp_used_exchangeid(struct nfs4_client *clp)
 {
-	bool has_exchange_flags = (clp->cl_exchange_flags != 0);
-	return use_exchange_id == has_exchange_flags;
-}
+	return clp->cl_exchange_flags != 0;
+} 
 
 static struct nfs4_client *
-find_confirmed_client_by_str(const char *dname, unsigned int hashval,
-			     bool use_exchange_id)
+find_confirmed_client_by_str(const char *dname, unsigned int hashval)
 {
 	struct nfs4_client *clp;
 
 	list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
-		if (same_name(clp->cl_recdir, dname) &&
-		    match_clientid_establishment(clp, use_exchange_id))
+		if (same_name(clp->cl_recdir, dname))
 			return clp;
 	}
 	return NULL;
 }
 
 static struct nfs4_client *
-find_unconfirmed_client_by_str(const char *dname, unsigned int hashval,
-			       bool use_exchange_id)
+find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
 {
 	struct nfs4_client *clp;
 
 	list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
-		if (same_name(clp->cl_recdir, dname) &&
-		    match_clientid_establishment(clp, use_exchange_id))
+		if (same_name(clp->cl_recdir, dname))
 			return clp;
 	}
 	return NULL;
 }
 
+static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr)
+{
+	switch (family) {
+	case AF_INET:
+		((struct sockaddr_in *)sa)->sin_family = AF_INET;
+		((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr;
+		return;
+	case AF_INET6:
+		((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6;
+		((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6;
+		return;
+	}
+}
+
 static void
-gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid)
+gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
 {
 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
+	struct sockaddr	*sa = svc_addr(rqstp);
+	u32 scopeid = rpc_get_scope_id(sa);
 	unsigned short expected_family;
 
 	/* Currently, we only support tcp and tcp6 for the callback channel */
@@ -1205,6 +1227,7 @@
 
 	conn->cb_prog = se->se_callback_prog;
 	conn->cb_ident = se->se_callback_ident;
+	rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr);
 	return;
 out_err:
 	conn->cb_addr.ss_family = AF_UNSPEC;
@@ -1344,7 +1367,7 @@
 	case SP4_NONE:
 		break;
 	case SP4_SSV:
-		return nfserr_encr_alg_unsupp;
+		return nfserr_serverfault;
 	default:
 		BUG();				/* checked by xdr code */
 	case SP4_MACH_CRED:
@@ -1361,8 +1384,12 @@
 	nfs4_lock_state();
 	status = nfs_ok;
 
-	conf = find_confirmed_client_by_str(dname, strhashval, true);
+	conf = find_confirmed_client_by_str(dname, strhashval);
 	if (conf) {
+		if (!clp_used_exchangeid(conf)) {
+			status = nfserr_clid_inuse; /* XXX: ? */
+			goto out;
+		}
 		if (!same_verf(&verf, &conf->cl_verifier)) {
 			/* 18.35.4 case 8 */
 			if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
@@ -1403,7 +1430,7 @@
 		goto out;
 	}
 
-	unconf  = find_unconfirmed_client_by_str(dname, strhashval, true);
+	unconf  = find_unconfirmed_client_by_str(dname, strhashval);
 	if (unconf) {
 		/*
 		 * Possible retry or client restart.  Per 18.35.4 case 4,
@@ -1560,6 +1587,8 @@
 	status = nfs_ok;
 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
 	       NFS4_MAX_SESSIONID_LEN);
+	memcpy(&cr_ses->fore_channel, &new->se_fchannel,
+		sizeof(struct nfsd4_channel_attrs));
 	cs_slot->sl_seqid++;
 	cr_ses->seqid = cs_slot->sl_seqid;
 
@@ -1581,6 +1610,45 @@
 	return argp->opcnt == resp->opcnt;
 }
 
+static __be32 nfsd4_map_bcts_dir(u32 *dir)
+{
+	switch (*dir) {
+	case NFS4_CDFC4_FORE:
+	case NFS4_CDFC4_BACK:
+		return nfs_ok;
+	case NFS4_CDFC4_FORE_OR_BOTH:
+	case NFS4_CDFC4_BACK_OR_BOTH:
+		*dir = NFS4_CDFC4_BOTH;
+		return nfs_ok;
+	};
+	return nfserr_inval;
+}
+
+__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
+		     struct nfsd4_compound_state *cstate,
+		     struct nfsd4_bind_conn_to_session *bcts)
+{
+	__be32 status;
+
+	if (!nfsd4_last_compound_op(rqstp))
+		return nfserr_not_only_op;
+	spin_lock(&client_lock);
+	cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
+	/* Sorta weird: we only need the refcnt'ing because new_conn acquires
+	 * client_lock iself: */
+	if (cstate->session) {
+		nfsd4_get_session(cstate->session);
+		atomic_inc(&cstate->session->se_client->cl_refcount);
+	}
+	spin_unlock(&client_lock);
+	if (!cstate->session)
+		return nfserr_badsession;
+
+	status = nfsd4_map_bcts_dir(&bcts->dir);
+	nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
+	return nfs_ok;
+}
+
 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
 {
 	if (!session)
@@ -1619,8 +1687,7 @@
 	spin_unlock(&client_lock);
 
 	nfs4_lock_state();
-	/* wait for callbacks */
-	nfsd4_shutdown_callback(ses->se_client);
+	nfsd4_probe_callback_sync(ses->se_client);
 	nfs4_unlock_state();
 
 	nfsd4_del_conns(ses);
@@ -1733,8 +1800,12 @@
 out:
 	/* Hold a session reference until done processing the compound. */
 	if (cstate->session) {
+		struct nfs4_client *clp = session->se_client;
+
 		nfsd4_get_session(cstate->session);
-		atomic_inc(&session->se_client->cl_refcount);
+		atomic_inc(&clp->cl_refcount);
+		if (clp->cl_cb_state == NFSD4_CB_DOWN)
+			seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN;
 	}
 	kfree(conn);
 	spin_unlock(&client_lock);
@@ -1775,7 +1846,6 @@
 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 		  struct nfsd4_setclientid *setclid)
 {
-	struct sockaddr		*sa = svc_addr(rqstp);
 	struct xdr_netobj 	clname = { 
 		.len = setclid->se_namelen,
 		.data = setclid->se_name,
@@ -1801,10 +1871,12 @@
 	strhashval = clientstr_hashval(dname);
 
 	nfs4_lock_state();
-	conf = find_confirmed_client_by_str(dname, strhashval, false);
+	conf = find_confirmed_client_by_str(dname, strhashval);
 	if (conf) {
 		/* RFC 3530 14.2.33 CASE 0: */
 		status = nfserr_clid_inuse;
+		if (clp_used_exchangeid(conf))
+			goto out;
 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
 			char addr_str[INET6_ADDRSTRLEN];
 			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
@@ -1819,7 +1891,7 @@
 	 * has a description of SETCLIENTID request processing consisting
 	 * of 5 bullet points, labeled as CASE0 - CASE4 below.
 	 */
-	unconf = find_unconfirmed_client_by_str(dname, strhashval, false);
+	unconf = find_unconfirmed_client_by_str(dname, strhashval);
 	status = nfserr_resource;
 	if (!conf) {
 		/*
@@ -1876,7 +1948,7 @@
 	 * for consistent minorversion use throughout:
 	 */
 	new->cl_minorversion = 0;
-	gen_callback(new, setclid, rpc_get_scope_id(sa));
+	gen_callback(new, setclid, rqstp);
 	add_to_unconfirmed(new, strhashval);
 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
@@ -1935,7 +2007,6 @@
 		if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
 			status = nfserr_clid_inuse;
 		else {
-			atomic_set(&conf->cl_cb_set, 0);
 			nfsd4_change_callback(conf, &unconf->cl_cb_conn);
 			nfsd4_probe_callback(conf);
 			expire_client(unconf);
@@ -1964,7 +2035,7 @@
 			unsigned int hash =
 				clientstr_hashval(unconf->cl_recdir);
 			conf = find_confirmed_client_by_str(unconf->cl_recdir,
-							    hash, false);
+							    hash);
 			if (conf) {
 				nfsd4_remove_clid_dir(conf);
 				expire_client(conf);
@@ -2300,41 +2371,6 @@
 	nfsd4_cb_recall(dp);
 }
 
-/*
- * The file_lock is being reapd.
- *
- * Called by locks_free_lock() with lock_flocks() held.
- */
-static
-void nfsd_release_deleg_cb(struct file_lock *fl)
-{
-	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
-
-	dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count));
-
-	if (!(fl->fl_flags & FL_LEASE) || !dp)
-		return;
-	dp->dl_flock = NULL;
-}
-
-/*
- * Called from setlease() with lock_flocks() held
- */
-static
-int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try)
-{
-	struct nfs4_delegation *onlistd =
-		(struct nfs4_delegation *)onlist->fl_owner;
-	struct nfs4_delegation *tryd =
-		(struct nfs4_delegation *)try->fl_owner;
-
-	if (onlist->fl_lmops != try->fl_lmops)
-		return 0;
-
-	return onlistd->dl_client == tryd->dl_client;
-}
-
-
 static
 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
 {
@@ -2346,8 +2382,6 @@
 
 static const struct lock_manager_operations nfsd_lease_mng_ops = {
 	.fl_break = nfsd_break_deleg_cb,
-	.fl_release_private = nfsd_release_deleg_cb,
-	.fl_mylease = nfsd_same_client_deleg_cb,
 	.fl_change = nfsd_change_deleg_cb,
 };
 
@@ -2514,8 +2548,6 @@
 	if (!fp->fi_fds[oflag]) {
 		status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
 			&fp->fi_fds[oflag]);
-		if (status == nfserr_dropit)
-			status = nfserr_jukebox;
 		if (status)
 			return status;
 	}
@@ -2596,6 +2628,19 @@
 	open->op_stateowner->so_client->cl_firststate = 1;
 }
 
+/* Should we give out recallable state?: */
+static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
+{
+	if (clp->cl_cb_state == NFSD4_CB_UP)
+		return true;
+	/*
+	 * In the sessions case, since we don't have to establish a
+	 * separate connection for callbacks, we assume it's OK
+	 * until we hear otherwise:
+	 */
+	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
+}
+
 /*
  * Attempt to hand out a delegation.
  */
@@ -2604,10 +2649,11 @@
 {
 	struct nfs4_delegation *dp;
 	struct nfs4_stateowner *sop = stp->st_stateowner;
-	int cb_up = atomic_read(&sop->so_client->cl_cb_set);
+	int cb_up;
 	struct file_lock *fl;
 	int status, flag = 0;
 
+	cb_up = nfsd4_cb_channel_good(sop->so_client);
 	flag = NFS4_OPEN_DELEGATE_NONE;
 	open->op_recall = 0;
 	switch (open->op_claim_type) {
@@ -2655,7 +2701,7 @@
 	dp->dl_flock = fl;
 
 	/* vfs_setlease checks to see if delegation should be handed out.
-	 * the lock_manager callbacks fl_mylease and fl_change are used
+	 * the lock_manager callback fl_change is used
 	 */
 	if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
 		dprintk("NFSD: setlease failed [%d], no delegation\n", status);
@@ -2794,7 +2840,7 @@
 	renew_client(clp);
 	status = nfserr_cb_path_down;
 	if (!list_empty(&clp->cl_delegations)
-			&& !atomic_read(&clp->cl_cb_set))
+			&& clp->cl_cb_state != NFSD4_CB_UP)
 		goto out;
 	status = nfs_ok;
 out:
@@ -3081,9 +3127,10 @@
 		if (status)
 			goto out;
 		renew_client(dp->dl_client);
-		if (filpp)
+		if (filpp) {
 			*filpp = find_readable_file(dp->dl_file);
-		BUG_ON(!*filpp);
+			BUG_ON(!*filpp);
+		}
 	} else { /* open or lock stateid */
 		stp = find_stateid(stateid, flags);
 		if (!stp)
@@ -4107,7 +4154,7 @@
 	unsigned int strhashval = clientstr_hashval(name);
 	struct nfs4_client *clp;
 
-	clp = find_confirmed_client_by_str(name, strhashval, use_exchange_id);
+	clp = find_confirmed_client_by_str(name, strhashval);
 	return clp ? 1 : 0;
 }
 
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index f35a94a..956629b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -44,13 +44,14 @@
 #include <linux/namei.h>
 #include <linux/statfs.h>
 #include <linux/utsname.h>
-#include <linux/nfsd_idmap.h>
-#include <linux/nfs4_acl.h>
 #include <linux/sunrpc/svcauth_gss.h>
 
+#include "idmap.h"
+#include "acl.h"
 #include "xdr4.h"
 #include "vfs.h"
 
+
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
 /*
@@ -288,17 +289,17 @@
 			len += XDR_QUADLEN(dummy32) << 2;
 			READMEM(buf, dummy32);
 			ace->whotype = nfs4_acl_get_whotype(buf, dummy32);
-			host_err = 0;
+			status = nfs_ok;
 			if (ace->whotype != NFS4_ACL_WHO_NAMED)
 				ace->who = 0;
 			else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
-				host_err = nfsd_map_name_to_gid(argp->rqstp,
+				status = nfsd_map_name_to_gid(argp->rqstp,
 						buf, dummy32, &ace->who);
 			else
-				host_err = nfsd_map_name_to_uid(argp->rqstp,
+				status = nfsd_map_name_to_uid(argp->rqstp,
 						buf, dummy32, &ace->who);
-			if (host_err)
-				goto out_nfserr;
+			if (status)
+				return status;
 		}
 	} else
 		*acl = NULL;
@@ -420,6 +421,21 @@
 	DECODE_TAIL;
 }
 
+static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
+{
+	DECODE_HEAD;
+	u32 dummy;
+
+	READ_BUF(NFS4_MAX_SESSIONID_LEN + 8);
+	COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
+	READ32(bcts->dir);
+	/* XXX: Perhaps Tom Tucker could help us figure out how we
+	 * should be using ctsa_use_conn_in_rdma_mode: */
+	READ32(dummy);
+
+	DECODE_TAIL;
+}
+
 static __be32
 nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
 {
@@ -847,6 +863,17 @@
 }
 
 static __be32
+nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
+		     struct nfsd4_secinfo_no_name *sin)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(sin->sin_style);
+	DECODE_TAIL;
+}
+
+static __be32
 nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
 {
 	__be32 status;
@@ -1005,7 +1032,7 @@
 nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
 			 struct nfsd4_exchange_id *exid)
 {
-	int dummy;
+	int dummy, tmp;
 	DECODE_HEAD;
 
 	READ_BUF(NFS4_VERIFIER_SIZE);
@@ -1053,15 +1080,23 @@
 
 		/* ssp_hash_algs<> */
 		READ_BUF(4);
-		READ32(dummy);
-		READ_BUF(dummy);
-		p += XDR_QUADLEN(dummy);
+		READ32(tmp);
+		while (tmp--) {
+			READ_BUF(4);
+			READ32(dummy);
+			READ_BUF(dummy);
+			p += XDR_QUADLEN(dummy);
+		}
 
 		/* ssp_encr_algs<> */
 		READ_BUF(4);
-		READ32(dummy);
-		READ_BUF(dummy);
-		p += XDR_QUADLEN(dummy);
+		READ32(tmp);
+		while (tmp--) {
+			READ_BUF(4);
+			READ32(dummy);
+			READ_BUF(dummy);
+			p += XDR_QUADLEN(dummy);
+		}
 
 		/* ssp_window and ssp_num_gss_handles */
 		READ_BUF(8);
@@ -1339,7 +1374,7 @@
 
 	/* new operations for NFSv4.1 */
 	[OP_BACKCHANNEL_CTL]	= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_notsupp,
+	[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
 	[OP_EXCHANGE_ID]	= (nfsd4_dec)nfsd4_decode_exchange_id,
 	[OP_CREATE_SESSION]	= (nfsd4_dec)nfsd4_decode_create_session,
 	[OP_DESTROY_SESSION]	= (nfsd4_dec)nfsd4_decode_destroy_session,
@@ -1350,7 +1385,7 @@
 	[OP_LAYOUTCOMMIT]	= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_LAYOUTGET]		= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_LAYOUTRETURN]	= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_SECINFO_NO_NAME]	= (nfsd4_dec)nfsd4_decode_notsupp,
+	[OP_SECINFO_NO_NAME]	= (nfsd4_dec)nfsd4_decode_secinfo_no_name,
 	[OP_SEQUENCE]		= (nfsd4_dec)nfsd4_decode_sequence,
 	[OP_SET_SSV]		= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_TEST_STATEID]	= (nfsd4_dec)nfsd4_decode_notsupp,
@@ -2309,8 +2344,6 @@
 	case nfserr_resource:
 		nfserr = nfserr_toosmall;
 		goto fail;
-	case nfserr_dropit:
-		goto fail;
 	case nfserr_noent:
 		goto skip_entry;
 	default:
@@ -2365,6 +2398,21 @@
 	return nfserr;
 }
 
+static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
+{
+	__be32 *p;
+
+	if (!nfserr) {
+		RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 8);
+		WRITEMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
+		WRITE32(bcts->dir);
+		/* XXX: ? */
+		WRITE32(0);
+		ADJUST_ARGS();
+	}
+	return nfserr;
+}
+
 static __be32
 nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
 {
@@ -2826,11 +2874,10 @@
 }
 
 static __be32
-nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
-		     struct nfsd4_secinfo *secinfo)
+nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp,
+			 __be32 nfserr,struct svc_export *exp)
 {
 	int i = 0;
-	struct svc_export *exp = secinfo->si_exp;
 	u32 nflavs;
 	struct exp_flavor_info *flavs;
 	struct exp_flavor_info def_flavs[2];
@@ -2892,6 +2939,20 @@
 	return nfserr;
 }
 
+static __be32
+nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+		     struct nfsd4_secinfo *secinfo)
+{
+	return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->si_exp);
+}
+
+static __be32
+nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
+		     struct nfsd4_secinfo_no_name *secinfo)
+{
+	return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->sin_exp);
+}
+
 /*
  * The SETATTR encode routine is special -- it always encodes a bitmap,
  * regardless of the error status.
@@ -3076,13 +3137,9 @@
 	WRITE32(seq->seqid);
 	WRITE32(seq->slotid);
 	WRITE32(seq->maxslots);
-	/*
-	 * FIXME: for now:
-	 *   target_maxslots = maxslots
-	 *   status_flags = 0
-	 */
+	/* For now: target_maxslots = maxslots */
 	WRITE32(seq->maxslots);
-	WRITE32(0);
+	WRITE32(seq->status_flags);
 
 	ADJUST_ARGS();
 	resp->cstate.datap = p; /* DRC cache data pointer */
@@ -3143,7 +3200,7 @@
 
 	/* NFSv4.1 operations */
 	[OP_BACKCHANNEL_CTL]	= (nfsd4_enc)nfsd4_encode_noop,
-	[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_noop,
+	[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
 	[OP_EXCHANGE_ID]	= (nfsd4_enc)nfsd4_encode_exchange_id,
 	[OP_CREATE_SESSION]	= (nfsd4_enc)nfsd4_encode_create_session,
 	[OP_DESTROY_SESSION]	= (nfsd4_enc)nfsd4_encode_destroy_session,
@@ -3154,7 +3211,7 @@
 	[OP_LAYOUTCOMMIT]	= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_LAYOUTGET]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_LAYOUTRETURN]	= (nfsd4_enc)nfsd4_encode_noop,
-	[OP_SECINFO_NO_NAME]	= (nfsd4_enc)nfsd4_encode_noop,
+	[OP_SECINFO_NO_NAME]	= (nfsd4_enc)nfsd4_encode_secinfo_no_name,
 	[OP_SEQUENCE]		= (nfsd4_enc)nfsd4_encode_sequence,
 	[OP_SET_SSV]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_TEST_STATEID]	= (nfsd4_enc)nfsd4_encode_noop,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 4514ebb..33b3e2b 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -8,12 +8,12 @@
 #include <linux/namei.h>
 #include <linux/ctype.h>
 
-#include <linux/nfsd_idmap.h>
 #include <linux/sunrpc/svcsock.h>
 #include <linux/nfsd/syscall.h>
 #include <linux/lockd/lockd.h>
 #include <linux/sunrpc/clnt.h>
 
+#include "idmap.h"
 #include "nfsd.h"
 #include "cache.h"
 
@@ -127,6 +127,7 @@
 
 static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
 {
+#ifdef CONFIG_NFSD_DEPRECATED
 	static int warned;
 	if (file->f_dentry->d_name.name[0] == '.' && !warned) {
 		printk(KERN_INFO
@@ -135,6 +136,7 @@
 		       current->comm, file->f_dentry->d_name.name);
 		warned = 1;
 	}
+#endif
 	if (! file->private_data) {
 		/* An attempt to read a transaction file without writing
 		 * causes a 0-byte write so that the file can return
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 6b641cf..7ecfa24 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -158,6 +158,7 @@
 #define	nfserr_attrnotsupp	cpu_to_be32(NFSERR_ATTRNOTSUPP)
 #define	nfserr_bad_xdr		cpu_to_be32(NFSERR_BAD_XDR)
 #define	nfserr_openmode		cpu_to_be32(NFSERR_OPENMODE)
+#define	nfserr_badowner		cpu_to_be32(NFSERR_BADOWNER)
 #define	nfserr_locks_held	cpu_to_be32(NFSERR_LOCKS_HELD)
 #define	nfserr_op_illegal	cpu_to_be32(NFSERR_OP_ILLEGAL)
 #define	nfserr_grace		cpu_to_be32(NFSERR_GRACE)
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 08e1726..e15dc45 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -735,9 +735,9 @@
 		{ nfserr_stale, -ESTALE },
 		{ nfserr_jukebox, -ETIMEDOUT },
 		{ nfserr_jukebox, -ERESTARTSYS },
-		{ nfserr_dropit, -EAGAIN },
-		{ nfserr_dropit, -ENOMEM },
-		{ nfserr_badname, -ESRCH },
+		{ nfserr_jukebox, -EAGAIN },
+		{ nfserr_jukebox, -EWOULDBLOCK },
+		{ nfserr_jukebox, -ENOMEM },
 		{ nfserr_io, -ETXTBSY },
 		{ nfserr_notsupp, -EOPNOTSUPP },
 		{ nfserr_toosmall, -ETOOSMALL },
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 2bae1d8..18743c4 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -608,7 +608,7 @@
 	/* Now call the procedure handler, and encode NFS status. */
 	nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
 	nfserr = map_new_errors(rqstp->rq_vers, nfserr);
-	if (nfserr == nfserr_dropit) {
+	if (nfserr == nfserr_dropit || rqstp->rq_dropme) {
 		dprintk("nfsd: Dropping request; may be revisited later\n");
 		nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
 		return 0;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 39adc27..3074656 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -68,10 +68,12 @@
 struct nfsd4_callback {
 	void *cb_op;
 	struct nfs4_client *cb_clp;
+	struct list_head cb_per_client;
 	u32 cb_minorversion;
 	struct rpc_message cb_msg;
 	const struct rpc_call_ops *cb_ops;
 	struct work_struct cb_work;
+	bool cb_done;
 };
 
 struct nfs4_delegation {
@@ -81,6 +83,7 @@
 	atomic_t		dl_count;       /* ref count */
 	struct nfs4_client	*dl_client;
 	struct nfs4_file	*dl_file;
+	struct file		*dl_vfs_file;
 	struct file_lock	*dl_flock;
 	u32			dl_type;
 	time_t			dl_time;
@@ -95,6 +98,7 @@
 struct nfs4_cb_conn {
 	/* SETCLIENTID info */
 	struct sockaddr_storage	cb_addr;
+	struct sockaddr_storage	cb_saddr;
 	size_t			cb_addrlen;
 	u32                     cb_prog; /* used only in 4.0 case;
 					    per-session otherwise */
@@ -146,6 +150,11 @@
 	u32				gid;
 };
 
+struct nfsd4_bind_conn_to_session {
+	struct nfs4_sessionid		sessionid;
+	u32				dir;
+};
+
 /* The single slot clientid cache structure */
 struct nfsd4_clid_slot {
 	u32				sl_seqid;
@@ -235,9 +244,13 @@
 	unsigned long		cl_cb_flags;
 	struct rpc_clnt		*cl_cb_client;
 	u32			cl_cb_ident;
-	atomic_t		cl_cb_set;
+#define NFSD4_CB_UP		0
+#define NFSD4_CB_UNKNOWN	1
+#define NFSD4_CB_DOWN		2
+	int			cl_cb_state;
 	struct nfsd4_callback	cl_cb_null;
 	struct nfsd4_session	*cl_cb_session;
+	struct list_head	cl_callbacks; /* list of in-progress callbacks */
 
 	/* for all client information that callback code might need: */
 	spinlock_t		cl_lock;
@@ -454,6 +467,7 @@
 extern void nfs4_free_stateowner(struct kref *kref);
 extern int set_callback_cred(void);
 extern void nfsd4_probe_callback(struct nfs4_client *clp);
+extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
 extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
 extern void nfsd4_do_callback_rpc(struct work_struct *);
 extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 230b79f..641117f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1,4 +1,3 @@
-#define MSNFS	/* HACK HACK */
 /*
  * File operations used by nfsd. Some of these have been ripped from
  * other parts of the kernel because they weren't exported, others
@@ -35,8 +34,8 @@
 #endif /* CONFIG_NFSD_V3 */
 
 #ifdef CONFIG_NFSD_V4
-#include <linux/nfs4_acl.h>
-#include <linux/nfsd_idmap.h>
+#include "acl.h"
+#include "idmap.h"
 #endif /* CONFIG_NFSD_V4 */
 
 #include "nfsd.h"
@@ -88,8 +87,9 @@
 			    .dentry = dget(dentry)};
 	int err = 0;
 
-	while (d_mountpoint(path.dentry) && follow_down(&path))
-		;
+	err = follow_down(&path, false);
+	if (err < 0)
+		goto out;
 
 	exp2 = rqst_exp_get_by_name(rqstp, &path);
 	if (IS_ERR(exp2)) {
@@ -273,6 +273,13 @@
 	return err;
 }
 
+static int nfsd_break_lease(struct inode *inode)
+{
+	if (!S_ISREG(inode->i_mode))
+		return 0;
+	return break_lease(inode, O_WRONLY | O_NONBLOCK);
+}
+
 /*
  * Commit metadata changes to stable storage.
  */
@@ -375,16 +382,6 @@
 				goto out;
 		}
 
-		/*
-		 * If we are changing the size of the file, then
-		 * we need to break all leases.
-		 */
-		host_err = break_lease(inode, O_WRONLY | O_NONBLOCK);
-		if (host_err == -EWOULDBLOCK)
-			host_err = -ETIMEDOUT;
-		if (host_err) /* ENOMEM or EWOULDBLOCK */
-			goto out_nfserr;
-
 		host_err = get_write_access(inode);
 		if (host_err)
 			goto out_nfserr;
@@ -425,7 +422,11 @@
 
 	err = nfserr_notsync;
 	if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+		host_err = nfsd_break_lease(inode);
+		if (host_err)
+			goto out_nfserr;
 		fh_lock(fhp);
+
 		host_err = notify_change(dentry, iap);
 		err = nfserrno(host_err);
 		fh_unlock(fhp);
@@ -752,8 +753,6 @@
 	 */
 	if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
 		host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
-	if (host_err == -EWOULDBLOCK)
-		host_err = -ETIMEDOUT;
 	if (host_err) /* NOMEM or WOULDBLOCK */
 		goto out_nfserr;
 
@@ -874,15 +873,6 @@
 	return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
 }
 
-static inline int svc_msnfs(struct svc_fh *ffhp)
-{
-#ifdef MSNFS
-	return (ffhp->fh_export->ex_flags & NFSEXP_MSNFS);
-#else
-	return 0;
-#endif
-}
-
 static __be32
 nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
               loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
@@ -895,9 +885,6 @@
 	err = nfserr_perm;
 	inode = file->f_path.dentry->d_inode;
 
-	if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count))
-		goto out;
-
 	if (file->f_op->splice_read && rqstp->rq_splice_ok) {
 		struct splice_desc sd = {
 			.len		= 0,
@@ -922,7 +909,6 @@
 		fsnotify_access(file);
 	} else 
 		err = nfserrno(host_err);
-out:
 	return err;
 }
 
@@ -987,14 +973,6 @@
 	int			stable = *stablep;
 	int			use_wgather;
 
-#ifdef MSNFS
-	err = nfserr_perm;
-
-	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
-		(!lock_may_write(file->f_path.dentry->d_inode, offset, *cnt)))
-		goto out;
-#endif
-
 	dentry = file->f_path.dentry;
 	inode = dentry->d_inode;
 	exp   = fhp->fh_export;
@@ -1045,7 +1023,6 @@
 		err = 0;
 	else
 		err = nfserrno(host_err);
-out:
 	return err;
 }
 
@@ -1665,6 +1642,12 @@
 		err = nfserrno(host_err);
 		goto out_dput;
 	}
+	err = nfserr_noent;
+	if (!dold->d_inode)
+		goto out_drop_write;
+	host_err = nfsd_break_lease(dold->d_inode);
+	if (host_err)
+		goto out_drop_write;
 	host_err = vfs_link(dold, dirp, dnew);
 	if (!host_err) {
 		err = nfserrno(commit_metadata(ffhp));
@@ -1676,6 +1659,7 @@
 		else
 			err = nfserrno(host_err);
 	}
+out_drop_write:
 	mnt_drop_write(tfhp->fh_export->ex_path.mnt);
 out_dput:
 	dput(dnew);
@@ -1750,12 +1734,6 @@
 	if (ndentry == trap)
 		goto out_dput_new;
 
-	if (svc_msnfs(ffhp) &&
-		((odentry->d_count > 1) || (ndentry->d_count > 1))) {
-			host_err = -EPERM;
-			goto out_dput_new;
-	}
-
 	host_err = -EXDEV;
 	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
 		goto out_dput_new;
@@ -1763,15 +1741,17 @@
 	if (host_err)
 		goto out_dput_new;
 
+	host_err = nfsd_break_lease(odentry->d_inode);
+	if (host_err)
+		goto out_drop_write;
 	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
 	if (!host_err) {
 		host_err = commit_metadata(tfhp);
 		if (!host_err)
 			host_err = commit_metadata(ffhp);
 	}
-
+out_drop_write:
 	mnt_drop_write(ffhp->fh_export->ex_path.mnt);
-
  out_dput_new:
 	dput(ndentry);
  out_dput_old:
@@ -1834,18 +1814,14 @@
 	if (host_err)
 		goto out_nfserr;
 
-	if (type != S_IFDIR) { /* It's UNLINK */
-#ifdef MSNFS
-		if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
-			(rdentry->d_count > 1)) {
-			host_err = -EPERM;
-		} else
-#endif
+	host_err = nfsd_break_lease(rdentry->d_inode);
+	if (host_err)
+		goto out_put;
+	if (type != S_IFDIR)
 		host_err = vfs_unlink(dirp, rdentry);
-	} else { /* It's RMDIR */
+	else
 		host_err = vfs_rmdir(dirp, rdentry);
-	}
-
+out_put:
 	dput(rdentry);
 
 	if (!host_err)
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 60fce3d..366401e 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -311,6 +311,11 @@
 	struct svc_export *si_exp;			/* response */
 };
 
+struct nfsd4_secinfo_no_name {
+	u32 sin_style;					/* request */
+	struct svc_export *sin_exp;			/* response */
+};
+
 struct nfsd4_setattr {
 	stateid_t	sa_stateid;         /* request */
 	u32		sa_bmval[3];        /* request */
@@ -373,8 +378,8 @@
 	u32			cachethis;		/* request */
 #if 0
 	u32			target_maxslots;	/* response */
-	u32			status_flags;		/* response */
 #endif /* not yet */
+	u32			status_flags;		/* response */
 };
 
 struct nfsd4_destroy_session {
@@ -422,6 +427,7 @@
 
 		/* NFSv4.1 */
 		struct nfsd4_exchange_id	exchange_id;
+		struct nfsd4_bind_conn_to_session bind_conn_to_session;
 		struct nfsd4_create_session	create_session;
 		struct nfsd4_destroy_session	destroy_session;
 		struct nfsd4_sequence		sequence;
@@ -518,6 +524,7 @@
 		struct nfsd4_sequence *seq);
 extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
 		struct nfsd4_compound_state *, struct nfsd4_exchange_id *);
+extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_bind_conn_to_session *);
 extern __be32 nfsd4_create_session(struct svc_rqst *,
 		struct nfsd4_compound_state *,
 		struct nfsd4_create_session *);
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
index ab152c0..77a8de5 100644
--- a/fs/ocfs2/Kconfig
+++ b/fs/ocfs2/Kconfig
@@ -1,7 +1,6 @@
 config OCFS2_FS
 	tristate "OCFS2 file system support"
-	depends on NET && SYSFS
-	select CONFIGFS_FS
+	depends on NET && SYSFS && CONFIGFS_FS
 	select JBD2
 	select CRC32
 	select QUOTA
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 63e3fca..a665195 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1989,20 +1989,20 @@
 	return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
 }
 
-static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
+static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
 			    loff_t len)
 {
+	struct inode *inode = file->f_path.dentry->d_inode;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct ocfs2_space_resv sr;
 	int change_size = 1;
 	int cmd = OCFS2_IOC_RESVSP64;
 
+	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+		return -EOPNOTSUPP;
 	if (!ocfs2_writes_unwritten_extents(osb))
 		return -EOPNOTSUPP;
 
-	if (S_ISDIR(inode->i_mode))
-		return -ENODEV;
-
 	if (mode & FALLOC_FL_KEEP_SIZE)
 		change_size = 0;
 
@@ -2610,7 +2610,6 @@
 	.getxattr	= generic_getxattr,
 	.listxattr	= ocfs2_listxattr,
 	.removexattr	= generic_removexattr,
-	.fallocate	= ocfs2_fallocate,
 	.fiemap		= ocfs2_fiemap,
 };
 
@@ -2642,6 +2641,7 @@
 	.flock		= ocfs2_flock,
 	.splice_read	= ocfs2_file_splice_read,
 	.splice_write	= ocfs2_file_splice_write,
+	.fallocate	= ocfs2_fallocate,
 };
 
 const struct file_operations ocfs2_dops = {
diff --git a/fs/open.c b/fs/open.c
index 5b6ef7e..e52389e 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -255,10 +255,10 @@
 	if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
 		return -EFBIG;
 
-	if (!inode->i_op->fallocate)
+	if (!file->f_op->fallocate)
 		return -EOPNOTSUPP;
 
-	return inode->i_op->fallocate(inode, mode, offset, len);
+	return file->f_op->fallocate(file, mode, offset, len);
 }
 
 SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
diff --git a/fs/pipe.c b/fs/pipe.c
index e2e95fb..89e9e19 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1292,7 +1292,7 @@
 static void __exit exit_pipe_fs(void)
 {
 	unregister_filesystem(&pipe_fs_type);
-	mntput_long(pipe_mnt);
+	mntput(pipe_mnt);
 }
 
 fs_initcall(init_pipe_fs);
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index e5f63da..aa68a8a 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -29,7 +29,6 @@
 config SQUASHFS_XATTR
 	bool "Squashfs XATTR support"
 	depends on SQUASHFS
-	default n
 	help
 	  Saying Y here includes support for extended attributes (xattrs).
 	  Xattrs are name:value pairs associated with inodes by
@@ -40,7 +39,6 @@
 config SQUASHFS_LZO
 	bool "Include support for LZO compressed file systems"
 	depends on SQUASHFS
-	default n
 	select LZO_DECOMPRESS
 	help
 	  Saying Y here includes support for reading Squashfs file systems
@@ -53,10 +51,24 @@
 
 	  If unsure, say N.
 
+config SQUASHFS_XZ
+	bool "Include support for XZ compressed file systems"
+	depends on SQUASHFS
+	select XZ_DEC
+	help
+	  Saying Y here includes support for reading Squashfs file systems
+	  compressed with XZ compresssion.  XZ gives better compression than
+	  the default zlib compression, at the expense of greater CPU and
+	  memory overhead.
+
+	  XZ is not the standard compression used in Squashfs and so most
+	  file systems will be readable without selecting this option.
+
+	  If unsure, say N.
+
 config SQUASHFS_EMBEDDED
 	bool "Additional option for memory-constrained systems"
 	depends on SQUASHFS
-	default n
 	help
 	  Saying Y here allows you to specify cache size.
 
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 7672bac..cecf2be 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -7,3 +7,4 @@
 squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o
 squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o
 squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 653c030..2fb2882 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -34,7 +34,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 #include "decompressor.h"
 
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 57314be..26b15ae 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -55,7 +55,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 
 /*
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index 24af9ce..a5940e5 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -27,7 +27,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "decompressor.h"
 #include "squashfs.h"
 
@@ -41,23 +40,26 @@
 };
 
 #ifndef CONFIG_SQUASHFS_LZO
-static const struct squashfs_decompressor squashfs_lzo_unsupported_comp_ops = {
+static const struct squashfs_decompressor squashfs_lzo_comp_ops = {
 	NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
 };
 #endif
 
+#ifndef CONFIG_SQUASHFS_XZ
+static const struct squashfs_decompressor squashfs_xz_comp_ops = {
+	NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
+};
+#endif
+
 static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
 	NULL, NULL, NULL, 0, "unknown", 0
 };
 
 static const struct squashfs_decompressor *decompressor[] = {
 	&squashfs_zlib_comp_ops,
-	&squashfs_lzma_unsupported_comp_ops,
-#ifdef CONFIG_SQUASHFS_LZO
 	&squashfs_lzo_comp_ops,
-#else
-	&squashfs_lzo_unsupported_comp_ops,
-#endif
+	&squashfs_xz_comp_ops,
+	&squashfs_lzma_unsupported_comp_ops,
 	&squashfs_unknown_comp_ops
 };
 
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 7425f80..3b305a7 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -52,4 +52,13 @@
 	return msblk->decompressor->decompress(msblk, buffer, bh, b, offset,
 		length, srclength, pages);
 }
+
+#ifdef CONFIG_SQUASHFS_XZ
+extern const struct squashfs_decompressor squashfs_xz_comp_ops;
+#endif
+
+#ifdef CONFIG_SQUASHFS_LZO
+extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
+#endif
+
 #endif
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 7c90bbd..7eef571 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -39,7 +39,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 
 /*
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
index b7f64bc..d8f3245 100644
--- a/fs/squashfs/id.c
+++ b/fs/squashfs/id.c
@@ -37,7 +37,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 
 /*
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 5d87789..7da759e 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -29,7 +29,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 #include "decompressor.h"
 
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 5d45569..ba729d8 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -27,11 +27,6 @@
 
 #define WARNING(s, args...)	pr_warning("SQUASHFS: "s, ## args)
 
-static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
-{
-	return list_entry(inode, struct squashfs_inode_info, vfs_inode);
-}
-
 /* block.c */
 extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *,
 				int, int);
@@ -104,6 +99,3 @@
 
 /* zlib_wrapper.c */
 extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
-
-/* lzo_wrapper.c */
-extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index c5137fc..39533fe 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -238,6 +238,7 @@
 #define ZLIB_COMPRESSION	1
 #define LZMA_COMPRESSION	2
 #define LZO_COMPRESSION		3
+#define XZ_COMPRESSION		4
 
 struct squashfs_super_block {
 	__le32			s_magic;
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
index d3e3a37..359baef 100644
--- a/fs/squashfs/squashfs_fs_i.h
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -45,4 +45,10 @@
 	};
 	struct inode	vfs_inode;
 };
+
+
+static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
+{
+	return list_entry(inode, struct squashfs_inode_info, vfs_inode);
+}
 #endif
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
index d33be5d..05385db 100644
--- a/fs/squashfs/xattr_id.c
+++ b/fs/squashfs/xattr_id.c
@@ -32,7 +32,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 #include "xattr.h"
 
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
new file mode 100644
index 0000000..856756c
--- /dev/null
+++ b/fs/squashfs/xz_wrapper.c
@@ -0,0 +1,153 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * xz_wrapper.c
+ */
+
+
+#include <linux/mutex.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/xz.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "decompressor.h"
+
+struct squashfs_xz {
+	struct xz_dec *state;
+	struct xz_buf buf;
+};
+
+static void *squashfs_xz_init(struct squashfs_sb_info *msblk)
+{
+	int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+
+	struct squashfs_xz *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+	if (stream == NULL)
+		goto failed;
+
+	stream->state = xz_dec_init(XZ_PREALLOC, block_size);
+	if (stream->state == NULL)
+		goto failed;
+
+	return stream;
+
+failed:
+	ERROR("Failed to allocate xz workspace\n");
+	kfree(stream);
+	return NULL;
+}
+
+
+static void squashfs_xz_free(void *strm)
+{
+	struct squashfs_xz *stream = strm;
+
+	if (stream) {
+		xz_dec_end(stream->state);
+		kfree(stream);
+	}
+}
+
+
+static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
+	struct buffer_head **bh, int b, int offset, int length, int srclength,
+	int pages)
+{
+	enum xz_ret xz_err;
+	int avail, total = 0, k = 0, page = 0;
+	struct squashfs_xz *stream = msblk->stream;
+
+	mutex_lock(&msblk->read_data_mutex);
+
+	xz_dec_reset(stream->state);
+	stream->buf.in_pos = 0;
+	stream->buf.in_size = 0;
+	stream->buf.out_pos = 0;
+	stream->buf.out_size = PAGE_CACHE_SIZE;
+	stream->buf.out = buffer[page++];
+
+	do {
+		if (stream->buf.in_pos == stream->buf.in_size && k < b) {
+			avail = min(length, msblk->devblksize - offset);
+			length -= avail;
+			wait_on_buffer(bh[k]);
+			if (!buffer_uptodate(bh[k]))
+				goto release_mutex;
+
+			if (avail == 0) {
+				offset = 0;
+				put_bh(bh[k++]);
+				continue;
+			}
+
+			stream->buf.in = bh[k]->b_data + offset;
+			stream->buf.in_size = avail;
+			stream->buf.in_pos = 0;
+			offset = 0;
+		}
+
+		if (stream->buf.out_pos == stream->buf.out_size
+							&& page < pages) {
+			stream->buf.out = buffer[page++];
+			stream->buf.out_pos = 0;
+			total += PAGE_CACHE_SIZE;
+		}
+
+		xz_err = xz_dec_run(stream->state, &stream->buf);
+
+		if (stream->buf.in_pos == stream->buf.in_size && k < b)
+			put_bh(bh[k++]);
+	} while (xz_err == XZ_OK);
+
+	if (xz_err != XZ_STREAM_END) {
+		ERROR("xz_dec_run error, data probably corrupt\n");
+		goto release_mutex;
+	}
+
+	if (k < b) {
+		ERROR("xz_uncompress error, input remaining\n");
+		goto release_mutex;
+	}
+
+	total += stream->buf.out_pos;
+	mutex_unlock(&msblk->read_data_mutex);
+	return total;
+
+release_mutex:
+	mutex_unlock(&msblk->read_data_mutex);
+
+	for (; k < b; k++)
+		put_bh(bh[k]);
+
+	return -EIO;
+}
+
+const struct squashfs_decompressor squashfs_xz_comp_ops = {
+	.init = squashfs_xz_init,
+	.free = squashfs_xz_free,
+	.decompress = squashfs_xz_uncompress,
+	.id = XZ_COMPRESSION,
+	.name = "xz",
+	.supported = 1
+};
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 7a60387..818a5e0 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -29,7 +29,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 #include "decompressor.h"
 
@@ -66,8 +65,8 @@
 	struct buffer_head **bh, int b, int offset, int length, int srclength,
 	int pages)
 {
-	int zlib_err = 0, zlib_init = 0;
-	int avail, bytes, k = 0, page = 0;
+	int zlib_err, zlib_init = 0;
+	int k = 0, page = 0;
 	z_stream *stream = msblk->stream;
 
 	mutex_lock(&msblk->read_data_mutex);
@@ -75,11 +74,10 @@
 	stream->avail_out = 0;
 	stream->avail_in = 0;
 
-	bytes = length;
 	do {
 		if (stream->avail_in == 0 && k < b) {
-			avail = min(bytes, msblk->devblksize - offset);
-			bytes -= avail;
+			int avail = min(length, msblk->devblksize - offset);
+			length -= avail;
 			wait_on_buffer(bh[k]);
 			if (!buffer_uptodate(bh[k]))
 				goto release_mutex;
@@ -128,6 +126,11 @@
 		goto release_mutex;
 	}
 
+	if (k < b) {
+		ERROR("zlib_uncompress error, data remaining\n");
+		goto release_mutex;
+	}
+
 	length = stream->total_out;
 	mutex_unlock(&msblk->read_data_mutex);
 	return length;
diff --git a/fs/stat.c b/fs/stat.c
index 12e90e2..d5c61cf 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -75,11 +75,13 @@
 	int error = -EINVAL;
 	int lookup_flags = 0;
 
-	if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
+	if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT)) != 0)
 		goto out;
 
 	if (!(flag & AT_SYMLINK_NOFOLLOW))
 		lookup_flags |= LOOKUP_FOLLOW;
+	if (flag & AT_NO_AUTOMOUNT)
+		lookup_flags |= LOOKUP_NO_AUTOMOUNT;
 
 	error = user_path_at(dfd, filename, lookup_flags, &path);
 	if (error)
diff --git a/fs/super.c b/fs/super.c
index 4f6a357..74e149e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1141,7 +1141,7 @@
 	return mnt;
 
  err:
-	mntput_long(mnt);
+	mntput(mnt);
 	return ERR_PTR(err);
 }
 
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 0dce969..faca449 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -98,6 +98,7 @@
 				   kmem.o \
 				   xfs_aops.o \
 				   xfs_buf.o \
+				   xfs_discard.o \
 				   xfs_export.o \
 				   xfs_file.o \
 				   xfs_fs_subr.o \
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 92f1f2a..ac1c7e8 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -896,7 +896,6 @@
 	trace_xfs_buf_rele(bp, _RET_IP_);
 
 	if (!pag) {
-		ASSERT(!bp->b_relse);
 		ASSERT(list_empty(&bp->b_lru));
 		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
 		if (atomic_dec_and_test(&bp->b_hold))
@@ -908,11 +907,7 @@
 
 	ASSERT(atomic_read(&bp->b_hold) > 0);
 	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
-		if (bp->b_relse) {
-			atomic_inc(&bp->b_hold);
-			spin_unlock(&pag->pag_buf_lock);
-			bp->b_relse(bp);
-		} else if (!(bp->b_flags & XBF_STALE) &&
+		if (!(bp->b_flags & XBF_STALE) &&
 			   atomic_read(&bp->b_lru_ref)) {
 			xfs_buf_lru_add(bp);
 			spin_unlock(&pag->pag_buf_lock);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index a76c242..cbe6595 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -152,8 +152,6 @@
 
 struct xfs_buf;
 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
-typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
-typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
 
 #define XB_PAGES	2
 
@@ -183,7 +181,6 @@
 	void			*b_addr;	/* virtual address of buffer */
 	struct work_struct	b_iodone_work;
 	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
-	xfs_buf_relse_t		b_relse;	/* releasing function */
 	struct completion	b_iowait;	/* queue for I/O waiters */
 	void			*b_fspriv;
 	void			*b_fspriv2;
@@ -323,7 +320,6 @@
 #define XFS_BUF_FSPRIVATE2(bp, type)		((type)(bp)->b_fspriv2)
 #define XFS_BUF_SET_FSPRIVATE2(bp, val)		((bp)->b_fspriv2 = (void*)(val))
 #define XFS_BUF_SET_START(bp)			do { } while (0)
-#define XFS_BUF_SET_BRELSE_FUNC(bp, func)	((bp)->b_relse = (func))
 
 #define XFS_BUF_PTR(bp)			(xfs_caddr_t)((bp)->b_addr)
 #define XFS_BUF_SET_PTR(bp, val, cnt)	xfs_buf_associate_memory(bp, val, cnt)
@@ -360,8 +356,7 @@
 
 static inline void xfs_buf_relse(xfs_buf_t *bp)
 {
-	if (!bp->b_relse)
-		xfs_buf_unlock(bp);
+	xfs_buf_unlock(bp);
 	xfs_buf_rele(bp);
 }
 
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c
new file mode 100644
index 0000000..05201ae
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_discard.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_sb.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_quota.h"
+#include "xfs_trans.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_discard.h"
+#include "xfs_trace.h"
+
+STATIC int
+xfs_trim_extents(
+	struct xfs_mount	*mp,
+	xfs_agnumber_t		agno,
+	xfs_fsblock_t		start,
+	xfs_fsblock_t		len,
+	xfs_fsblock_t		minlen,
+	__uint64_t		*blocks_trimmed)
+{
+	struct block_device	*bdev = mp->m_ddev_targp->bt_bdev;
+	struct xfs_btree_cur	*cur;
+	struct xfs_buf		*agbp;
+	struct xfs_perag	*pag;
+	int			error;
+	int			i;
+
+	pag = xfs_perag_get(mp, agno);
+
+	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+	if (error || !agbp)
+		goto out_put_perag;
+
+	cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
+
+	/*
+	 * Force out the log.  This means any transactions that might have freed
+	 * space before we took the AGF buffer lock are now on disk, and the
+	 * volatile disk cache is flushed.
+	 */
+	xfs_log_force(mp, XFS_LOG_SYNC);
+
+	/*
+	 * Look up the longest btree in the AGF and start with it.
+	 */
+	error = xfs_alloc_lookup_le(cur, 0,
+				    XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
+	if (error)
+		goto out_del_cursor;
+
+	/*
+	 * Loop until we are done with all extents that are large
+	 * enough to be worth discarding.
+	 */
+	while (i) {
+		xfs_agblock_t fbno;
+		xfs_extlen_t flen;
+
+		error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
+		if (error)
+			goto out_del_cursor;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
+		ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
+
+		/*
+		 * Too small?  Give up.
+		 */
+		if (flen < minlen) {
+			trace_xfs_discard_toosmall(mp, agno, fbno, flen);
+			goto out_del_cursor;
+		}
+
+		/*
+		 * If the extent is entirely outside of the range we are
+		 * supposed to discard skip it.  Do not bother to trim
+		 * down partially overlapping ranges for now.
+		 */
+		if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start ||
+		    XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) {
+			trace_xfs_discard_exclude(mp, agno, fbno, flen);
+			goto next_extent;
+		}
+
+		/*
+		 * If any blocks in the range are still busy, skip the
+		 * discard and try again the next time.
+		 */
+		if (xfs_alloc_busy_search(mp, agno, fbno, flen)) {
+			trace_xfs_discard_busy(mp, agno, fbno, flen);
+			goto next_extent;
+		}
+
+		trace_xfs_discard_extent(mp, agno, fbno, flen);
+		error = -blkdev_issue_discard(bdev,
+				XFS_AGB_TO_DADDR(mp, agno, fbno),
+				XFS_FSB_TO_BB(mp, flen),
+				GFP_NOFS, 0);
+		if (error)
+			goto out_del_cursor;
+		*blocks_trimmed += flen;
+
+next_extent:
+		error = xfs_btree_decrement(cur, 0, &i);
+		if (error)
+			goto out_del_cursor;
+	}
+
+out_del_cursor:
+	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+	xfs_buf_relse(agbp);
+out_put_perag:
+	xfs_perag_put(pag);
+	return error;
+}
+
+int
+xfs_ioc_trim(
+	struct xfs_mount		*mp,
+	struct fstrim_range __user	*urange)
+{
+	struct request_queue	*q = mp->m_ddev_targp->bt_bdev->bd_disk->queue;
+	unsigned int		granularity = q->limits.discard_granularity;
+	struct fstrim_range	range;
+	xfs_fsblock_t		start, len, minlen;
+	xfs_agnumber_t		start_agno, end_agno, agno;
+	__uint64_t		blocks_trimmed = 0;
+	int			error, last_error = 0;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -XFS_ERROR(EPERM);
+	if (copy_from_user(&range, urange, sizeof(range)))
+		return -XFS_ERROR(EFAULT);
+
+	/*
+	 * Truncating down the len isn't actually quite correct, but using
+	 * XFS_B_TO_FSB would mean we trivially get overflows for values
+	 * of ULLONG_MAX or slightly lower.  And ULLONG_MAX is the default
+	 * used by the fstrim application.  In the end it really doesn't
+	 * matter as trimming blocks is an advisory interface.
+	 */
+	start = XFS_B_TO_FSBT(mp, range.start);
+	len = XFS_B_TO_FSBT(mp, range.len);
+	minlen = XFS_B_TO_FSB(mp, max_t(u64, granularity, range.minlen));
+
+	start_agno = XFS_FSB_TO_AGNO(mp, start);
+	if (start_agno >= mp->m_sb.sb_agcount)
+		return -XFS_ERROR(EINVAL);
+
+	end_agno = XFS_FSB_TO_AGNO(mp, start + len);
+	if (end_agno >= mp->m_sb.sb_agcount)
+		end_agno = mp->m_sb.sb_agcount - 1;
+
+	for (agno = start_agno; agno <= end_agno; agno++) {
+		error = -xfs_trim_extents(mp, agno, start, len, minlen,
+					  &blocks_trimmed);
+		if (error)
+			last_error = error;
+	}
+
+	if (last_error)
+		return last_error;
+
+	range.len = XFS_FSB_TO_B(mp, blocks_trimmed);
+	if (copy_to_user(urange, &range, sizeof(range)))
+		return -XFS_ERROR(EFAULT);
+	return 0;
+}
diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/linux-2.6/xfs_discard.h
new file mode 100644
index 0000000..e82b6dd
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_discard.h
@@ -0,0 +1,8 @@
+#ifndef XFS_DISCARD_H
+#define XFS_DISCARD_H 1
+
+struct fstrim_range;
+
+extern int	xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *);
+
+#endif /* XFS_DISCARD_H */
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index ba8ad42..a55c1b4 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -37,10 +37,45 @@
 #include "xfs_trace.h"
 
 #include <linux/dcache.h>
+#include <linux/falloc.h>
 
 static const struct vm_operations_struct xfs_file_vm_ops;
 
 /*
+ * Locking primitives for read and write IO paths to ensure we consistently use
+ * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
+ */
+static inline void
+xfs_rw_ilock(
+	struct xfs_inode	*ip,
+	int			type)
+{
+	if (type & XFS_IOLOCK_EXCL)
+		mutex_lock(&VFS_I(ip)->i_mutex);
+	xfs_ilock(ip, type);
+}
+
+static inline void
+xfs_rw_iunlock(
+	struct xfs_inode	*ip,
+	int			type)
+{
+	xfs_iunlock(ip, type);
+	if (type & XFS_IOLOCK_EXCL)
+		mutex_unlock(&VFS_I(ip)->i_mutex);
+}
+
+static inline void
+xfs_rw_ilock_demote(
+	struct xfs_inode	*ip,
+	int			type)
+{
+	xfs_ilock_demote(ip, type);
+	if (type & XFS_IOLOCK_EXCL)
+		mutex_unlock(&VFS_I(ip)->i_mutex);
+}
+
+/*
  *	xfs_iozero
  *
  *	xfs_iozero clears the specified range of buffer supplied,
@@ -262,22 +297,21 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
-	if (unlikely(ioflags & IO_ISDIRECT))
-		mutex_lock(&inode->i_mutex);
-	xfs_ilock(ip, XFS_IOLOCK_SHARED);
-
 	if (unlikely(ioflags & IO_ISDIRECT)) {
+		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
+
 		if (inode->i_mapping->nrpages) {
 			ret = -xfs_flushinval_pages(ip,
 					(iocb->ki_pos & PAGE_CACHE_MASK),
 					-1, FI_REMAPF_LOCKED);
+			if (ret) {
+				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
+				return ret;
+			}
 		}
-		mutex_unlock(&inode->i_mutex);
-		if (ret) {
-			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
-			return ret;
-		}
-	}
+		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+	} else
+		xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 
 	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
 
@@ -285,7 +319,7 @@
 	if (ret > 0)
 		XFS_STATS_ADD(xs_read_bytes, ret);
 
-	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 	return ret;
 }
 
@@ -309,7 +343,7 @@
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return -EIO;
 
-	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 
 	trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
 
@@ -317,10 +351,61 @@
 	if (ret > 0)
 		XFS_STATS_ADD(xs_read_bytes, ret);
 
-	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 	return ret;
 }
 
+STATIC void
+xfs_aio_write_isize_update(
+	struct inode	*inode,
+	loff_t		*ppos,
+	ssize_t		bytes_written)
+{
+	struct xfs_inode	*ip = XFS_I(inode);
+	xfs_fsize_t		isize = i_size_read(inode);
+
+	if (bytes_written > 0)
+		XFS_STATS_ADD(xs_write_bytes, bytes_written);
+
+	if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
+					*ppos > isize))
+		*ppos = isize;
+
+	if (*ppos > ip->i_size) {
+		xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
+		if (*ppos > ip->i_size)
+			ip->i_size = *ppos;
+		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+	}
+}
+
+/*
+ * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
+ * part of the I/O may have been written to disk before the error occured.  In
+ * this case the on-disk file size may have been adjusted beyond the in-memory
+ * file size and now needs to be truncated back.
+ */
+STATIC void
+xfs_aio_write_newsize_update(
+	struct xfs_inode	*ip)
+{
+	if (ip->i_new_size) {
+		xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
+		ip->i_new_size = 0;
+		if (ip->i_d.di_size > ip->i_size)
+			ip->i_d.di_size = ip->i_size;
+		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+	}
+}
+
+/*
+ * xfs_file_splice_write() does not use xfs_rw_ilock() because
+ * generic_file_splice_write() takes the i_mutex itself. This, in theory,
+ * couuld cause lock inversions between the aio_write path and the splice path
+ * if someone is doing concurrent splice(2) based writes and write(2) based
+ * writes to the same inode. The only real way to fix this is to re-implement
+ * the generic code here with correct locking orders.
+ */
 STATIC ssize_t
 xfs_file_splice_write(
 	struct pipe_inode_info	*pipe,
@@ -331,7 +416,7 @@
 {
 	struct inode		*inode = outfilp->f_mapping->host;
 	struct xfs_inode	*ip = XFS_I(inode);
-	xfs_fsize_t		isize, new_size;
+	xfs_fsize_t		new_size;
 	int			ioflags = 0;
 	ssize_t			ret;
 
@@ -355,27 +440,9 @@
 	trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
 
 	ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
-	if (ret > 0)
-		XFS_STATS_ADD(xs_write_bytes, ret);
 
-	isize = i_size_read(inode);
-	if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
-		*ppos = isize;
-
-	if (*ppos > ip->i_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		if (*ppos > ip->i_size)
-			ip->i_size = *ppos;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
-
-	if (ip->i_new_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		ip->i_new_size = 0;
-		if (ip->i_d.di_size > ip->i_size)
-			ip->i_d.di_size = ip->i_size;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
+	xfs_aio_write_isize_update(inode, ppos, ret);
+	xfs_aio_write_newsize_update(ip);
 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 	return ret;
 }
@@ -562,6 +629,194 @@
 	return error;
 }
 
+/*
+ * Common pre-write limit and setup checks.
+ *
+ * Returns with iolock held according to @iolock.
+ */
+STATIC ssize_t
+xfs_file_aio_write_checks(
+	struct file		*file,
+	loff_t			*pos,
+	size_t			*count,
+	int			*iolock)
+{
+	struct inode		*inode = file->f_mapping->host;
+	struct xfs_inode	*ip = XFS_I(inode);
+	xfs_fsize_t		new_size;
+	int			error = 0;
+
+	error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
+	if (error) {
+		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
+		*iolock = 0;
+		return error;
+	}
+
+	new_size = *pos + *count;
+	if (new_size > ip->i_size)
+		ip->i_new_size = new_size;
+
+	if (likely(!(file->f_mode & FMODE_NOCMTIME)))
+		file_update_time(file);
+
+	/*
+	 * If the offset is beyond the size of the file, we need to zero any
+	 * blocks that fall between the existing EOF and the start of this
+	 * write.
+	 */
+	if (*pos > ip->i_size)
+		error = -xfs_zero_eof(ip, *pos, ip->i_size);
+
+	xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+	if (error)
+		return error;
+
+	/*
+	 * If we're writing the file then make sure to clear the setuid and
+	 * setgid bits if the process is not being run by root.  This keeps
+	 * people from modifying setuid and setgid binaries.
+	 */
+	return file_remove_suid(file);
+
+}
+
+/*
+ * xfs_file_dio_aio_write - handle direct IO writes
+ *
+ * Lock the inode appropriately to prepare for and issue a direct IO write.
+ * By separating it from the buffered write path we remove all the tricky to
+ * follow locking changes and looping.
+ *
+ * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
+ * until we're sure the bytes at the new EOF have been zeroed and/or the cached
+ * pages are flushed out.
+ *
+ * In most cases the direct IO writes will be done holding IOLOCK_SHARED
+ * allowing them to be done in parallel with reads and other direct IO writes.
+ * However, if the IO is not aligned to filesystem blocks, the direct IO layer
+ * needs to do sub-block zeroing and that requires serialisation against other
+ * direct IOs to the same block. In this case we need to serialise the
+ * submission of the unaligned IOs so that we don't get racing block zeroing in
+ * the dio layer.  To avoid the problem with aio, we also need to wait for
+ * outstanding IOs to complete so that unwritten extent conversion is completed
+ * before we try to map the overlapping block. This is currently implemented by
+ * hitting it with a big hammer (i.e. xfs_ioend_wait()).
+ *
+ * Returns with locks held indicated by @iolock and errors indicated by
+ * negative return values.
+ */
+STATIC ssize_t
+xfs_file_dio_aio_write(
+	struct kiocb		*iocb,
+	const struct iovec	*iovp,
+	unsigned long		nr_segs,
+	loff_t			pos,
+	size_t			ocount,
+	int			*iolock)
+{
+	struct file		*file = iocb->ki_filp;
+	struct address_space	*mapping = file->f_mapping;
+	struct inode		*inode = mapping->host;
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	ssize_t			ret = 0;
+	size_t			count = ocount;
+	int			unaligned_io = 0;
+	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?
+					mp->m_rtdev_targp : mp->m_ddev_targp;
+
+	*iolock = 0;
+	if ((pos & target->bt_smask) || (count & target->bt_smask))
+		return -XFS_ERROR(EINVAL);
+
+	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
+		unaligned_io = 1;
+
+	if (unaligned_io || mapping->nrpages || pos > ip->i_size)
+		*iolock = XFS_IOLOCK_EXCL;
+	else
+		*iolock = XFS_IOLOCK_SHARED;
+	xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
+
+	ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
+	if (ret)
+		return ret;
+
+	if (mapping->nrpages) {
+		WARN_ON(*iolock != XFS_IOLOCK_EXCL);
+		ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
+							FI_REMAPF_LOCKED);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * If we are doing unaligned IO, wait for all other IO to drain,
+	 * otherwise demote the lock if we had to flush cached pages
+	 */
+	if (unaligned_io)
+		xfs_ioend_wait(ip);
+	else if (*iolock == XFS_IOLOCK_EXCL) {
+		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+		*iolock = XFS_IOLOCK_SHARED;
+	}
+
+	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
+	ret = generic_file_direct_write(iocb, iovp,
+			&nr_segs, pos, &iocb->ki_pos, count, ocount);
+
+	/* No fallback to buffered IO on errors for XFS. */
+	ASSERT(ret < 0 || ret == count);
+	return ret;
+}
+
+STATIC ssize_t
+xfs_file_buffered_aio_write(
+	struct kiocb		*iocb,
+	const struct iovec	*iovp,
+	unsigned long		nr_segs,
+	loff_t			pos,
+	size_t			ocount,
+	int			*iolock)
+{
+	struct file		*file = iocb->ki_filp;
+	struct address_space	*mapping = file->f_mapping;
+	struct inode		*inode = mapping->host;
+	struct xfs_inode	*ip = XFS_I(inode);
+	ssize_t			ret;
+	int			enospc = 0;
+	size_t			count = ocount;
+
+	*iolock = XFS_IOLOCK_EXCL;
+	xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
+
+	ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
+	if (ret)
+		return ret;
+
+	/* We can write back this queue in page reclaim */
+	current->backing_dev_info = mapping->backing_dev_info;
+
+write_retry:
+	trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
+	ret = generic_file_buffered_write(iocb, iovp, nr_segs,
+			pos, &iocb->ki_pos, count, ret);
+	/*
+	 * if we just got an ENOSPC, flush the inode now we aren't holding any
+	 * page locks and retry *once*
+	 */
+	if (ret == -ENOSPC && !enospc) {
+		ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
+		if (ret)
+			return ret;
+		enospc = 1;
+		goto write_retry;
+	}
+	current->backing_dev_info = NULL;
+	return ret;
+}
+
 STATIC ssize_t
 xfs_file_aio_write(
 	struct kiocb		*iocb,
@@ -573,236 +828,115 @@
 	struct address_space	*mapping = file->f_mapping;
 	struct inode		*inode = mapping->host;
 	struct xfs_inode	*ip = XFS_I(inode);
-	struct xfs_mount	*mp = ip->i_mount;
-	ssize_t			ret = 0, error = 0;
-	int			ioflags = 0;
-	xfs_fsize_t		isize, new_size;
+	ssize_t			ret;
 	int			iolock;
-	size_t			ocount = 0, count;
-	int			need_i_mutex;
+	size_t			ocount = 0;
 
 	XFS_STATS_INC(xs_write_calls);
 
 	BUG_ON(iocb->ki_pos != pos);
 
-	if (unlikely(file->f_flags & O_DIRECT))
-		ioflags |= IO_ISDIRECT;
-	if (file->f_mode & FMODE_NOCMTIME)
-		ioflags |= IO_INVIS;
+	ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
+	if (ret)
+		return ret;
 
-	error = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
-	if (error)
-		return error;
-
-	count = ocount;
-	if (count == 0)
+	if (ocount == 0)
 		return 0;
 
-	xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
+	xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
 
-	if (XFS_FORCED_SHUTDOWN(mp))
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return -EIO;
 
-relock:
-	if (ioflags & IO_ISDIRECT) {
-		iolock = XFS_IOLOCK_SHARED;
-		need_i_mutex = 0;
-	} else {
-		iolock = XFS_IOLOCK_EXCL;
-		need_i_mutex = 1;
-		mutex_lock(&inode->i_mutex);
-	}
+	if (unlikely(file->f_flags & O_DIRECT))
+		ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
+						ocount, &iolock);
+	else
+		ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
+						ocount, &iolock);
 
-	xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
+	xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
 
-start:
-	error = -generic_write_checks(file, &pos, &count,
-					S_ISBLK(inode->i_mode));
-	if (error) {
-		xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
-		goto out_unlock_mutex;
-	}
-
-	if (ioflags & IO_ISDIRECT) {
-		xfs_buftarg_t	*target =
-			XFS_IS_REALTIME_INODE(ip) ?
-				mp->m_rtdev_targp : mp->m_ddev_targp;
-
-		if ((pos & target->bt_smask) || (count & target->bt_smask)) {
-			xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
-			return XFS_ERROR(-EINVAL);
-		}
-
-		if (!need_i_mutex && (mapping->nrpages || pos > ip->i_size)) {
-			xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
-			iolock = XFS_IOLOCK_EXCL;
-			need_i_mutex = 1;
-			mutex_lock(&inode->i_mutex);
-			xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
-			goto start;
-		}
-	}
-
-	new_size = pos + count;
-	if (new_size > ip->i_size)
-		ip->i_new_size = new_size;
-
-	if (likely(!(ioflags & IO_INVIS)))
-		file_update_time(file);
-
-	/*
-	 * If the offset is beyond the size of the file, we have a couple
-	 * of things to do. First, if there is already space allocated
-	 * we need to either create holes or zero the disk or ...
-	 *
-	 * If there is a page where the previous size lands, we need
-	 * to zero it out up to the new size.
-	 */
-
-	if (pos > ip->i_size) {
-		error = xfs_zero_eof(ip, pos, ip->i_size);
-		if (error) {
-			xfs_iunlock(ip, XFS_ILOCK_EXCL);
-			goto out_unlock_internal;
-		}
-	}
-	xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
-	/*
-	 * If we're writing the file then make sure to clear the
-	 * setuid and setgid bits if the process is not being run
-	 * by root.  This keeps people from modifying setuid and
-	 * setgid binaries.
-	 */
-	error = -file_remove_suid(file);
-	if (unlikely(error))
-		goto out_unlock_internal;
-
-	/* We can write back this queue in page reclaim */
-	current->backing_dev_info = mapping->backing_dev_info;
-
-	if ((ioflags & IO_ISDIRECT)) {
-		if (mapping->nrpages) {
-			WARN_ON(need_i_mutex == 0);
-			error = xfs_flushinval_pages(ip,
-					(pos & PAGE_CACHE_MASK),
-					-1, FI_REMAPF_LOCKED);
-			if (error)
-				goto out_unlock_internal;
-		}
-
-		if (need_i_mutex) {
-			/* demote the lock now the cached pages are gone */
-			xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
-			mutex_unlock(&inode->i_mutex);
-
-			iolock = XFS_IOLOCK_SHARED;
-			need_i_mutex = 0;
-		}
-
-		trace_xfs_file_direct_write(ip, count, iocb->ki_pos, ioflags);
-		ret = generic_file_direct_write(iocb, iovp,
-				&nr_segs, pos, &iocb->ki_pos, count, ocount);
-
-		/*
-		 * direct-io write to a hole: fall through to buffered I/O
-		 * for completing the rest of the request.
-		 */
-		if (ret >= 0 && ret != count) {
-			XFS_STATS_ADD(xs_write_bytes, ret);
-
-			pos += ret;
-			count -= ret;
-
-			ioflags &= ~IO_ISDIRECT;
-			xfs_iunlock(ip, iolock);
-			goto relock;
-		}
-	} else {
-		int enospc = 0;
-		ssize_t ret2 = 0;
-
-write_retry:
-		trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags);
-		ret2 = generic_file_buffered_write(iocb, iovp, nr_segs,
-				pos, &iocb->ki_pos, count, ret);
-		/*
-		 * if we just got an ENOSPC, flush the inode now we
-		 * aren't holding any page locks and retry *once*
-		 */
-		if (ret2 == -ENOSPC && !enospc) {
-			error = xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
-			if (error)
-				goto out_unlock_internal;
-			enospc = 1;
-			goto write_retry;
-		}
-		ret = ret2;
-	}
-
-	current->backing_dev_info = NULL;
-
-	isize = i_size_read(inode);
-	if (unlikely(ret < 0 && ret != -EFAULT && iocb->ki_pos > isize))
-		iocb->ki_pos = isize;
-
-	if (iocb->ki_pos > ip->i_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		if (iocb->ki_pos > ip->i_size)
-			ip->i_size = iocb->ki_pos;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
-
-	error = -ret;
 	if (ret <= 0)
-		goto out_unlock_internal;
-
-	XFS_STATS_ADD(xs_write_bytes, ret);
+		goto out_unlock;
 
 	/* Handle various SYNC-type writes */
 	if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
 		loff_t end = pos + ret - 1;
-		int error2;
+		int error, error2;
 
-		xfs_iunlock(ip, iolock);
-		if (need_i_mutex)
-			mutex_unlock(&inode->i_mutex);
-
-		error2 = filemap_write_and_wait_range(mapping, pos, end);
-		if (!error)
-			error = error2;
-		if (need_i_mutex)
-			mutex_lock(&inode->i_mutex);
-		xfs_ilock(ip, iolock);
+		xfs_rw_iunlock(ip, iolock);
+		error = filemap_write_and_wait_range(mapping, pos, end);
+		xfs_rw_ilock(ip, iolock);
 
 		error2 = -xfs_file_fsync(file,
 					 (file->f_flags & __O_SYNC) ? 0 : 1);
-		if (!error)
-			error = error2;
+		if (error)
+			ret = error;
+		else if (error2)
+			ret = error2;
 	}
 
- out_unlock_internal:
-	if (ip->i_new_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		ip->i_new_size = 0;
-		/*
-		 * If this was a direct or synchronous I/O that failed (such
-		 * as ENOSPC) then part of the I/O may have been written to
-		 * disk before the error occured.  In this case the on-disk
-		 * file size may have been adjusted beyond the in-memory file
-		 * size and now needs to be truncated back.
-		 */
-		if (ip->i_d.di_size > ip->i_size)
-			ip->i_d.di_size = ip->i_size;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
-	xfs_iunlock(ip, iolock);
- out_unlock_mutex:
-	if (need_i_mutex)
-		mutex_unlock(&inode->i_mutex);
-	return -error;
+out_unlock:
+	xfs_aio_write_newsize_update(ip);
+	xfs_rw_iunlock(ip, iolock);
+	return ret;
 }
 
+STATIC long
+xfs_file_fallocate(
+	struct file	*file,
+	int		mode,
+	loff_t		offset,
+	loff_t		len)
+{
+	struct inode	*inode = file->f_path.dentry->d_inode;
+	long		error;
+	loff_t		new_size = 0;
+	xfs_flock64_t	bf;
+	xfs_inode_t	*ip = XFS_I(inode);
+	int		cmd = XFS_IOC_RESVSP;
+
+	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+		return -EOPNOTSUPP;
+
+	bf.l_whence = 0;
+	bf.l_start = offset;
+	bf.l_len = len;
+
+	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+	if (mode & FALLOC_FL_PUNCH_HOLE)
+		cmd = XFS_IOC_UNRESVSP;
+
+	/* check the new inode size is valid before allocating */
+	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+	    offset + len > i_size_read(inode)) {
+		new_size = offset + len;
+		error = inode_newsize_ok(inode, new_size);
+		if (error)
+			goto out_unlock;
+	}
+
+	error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
+	if (error)
+		goto out_unlock;
+
+	/* Change file size if needed */
+	if (new_size) {
+		struct iattr iattr;
+
+		iattr.ia_valid = ATTR_SIZE;
+		iattr.ia_size = new_size;
+		error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
+	}
+
+out_unlock:
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	return error;
+}
+
+
 STATIC int
 xfs_file_open(
 	struct inode	*inode,
@@ -921,6 +1055,7 @@
 	.open		= xfs_file_open,
 	.release	= xfs_file_release,
 	.fsync		= xfs_file_fsync,
+	.fallocate	= xfs_file_fallocate,
 };
 
 const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index ad442d9..b06ede1 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -39,6 +39,7 @@
 #include "xfs_dfrag.h"
 #include "xfs_fsops.h"
 #include "xfs_vnodeops.h"
+#include "xfs_discard.h"
 #include "xfs_quota.h"
 #include "xfs_inode_item.h"
 #include "xfs_export.h"
@@ -1294,6 +1295,8 @@
 	trace_xfs_file_ioctl(ip);
 
 	switch (cmd) {
+	case FITRIM:
+		return xfs_ioc_trim(mp, arg);
 	case XFS_IOC_ALLOCSP:
 	case XFS_IOC_FREESP:
 	case XFS_IOC_RESVSP:
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index da54403..bd57278 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -46,7 +46,6 @@
 #include <linux/namei.h>
 #include <linux/posix_acl.h>
 #include <linux/security.h>
-#include <linux/falloc.h>
 #include <linux/fiemap.h>
 #include <linux/slab.h>
 
@@ -505,61 +504,6 @@
 	return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0);
 }
 
-STATIC long
-xfs_vn_fallocate(
-	struct inode	*inode,
-	int		mode,
-	loff_t		offset,
-	loff_t		len)
-{
-	long		error;
-	loff_t		new_size = 0;
-	xfs_flock64_t	bf;
-	xfs_inode_t	*ip = XFS_I(inode);
-	int		cmd = XFS_IOC_RESVSP;
-
-	/* preallocation on directories not yet supported */
-	error = -ENODEV;
-	if (S_ISDIR(inode->i_mode))
-		goto out_error;
-
-	bf.l_whence = 0;
-	bf.l_start = offset;
-	bf.l_len = len;
-
-	xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
-	if (mode & FALLOC_FL_PUNCH_HOLE)
-		cmd = XFS_IOC_UNRESVSP;
-
-	/* check the new inode size is valid before allocating */
-	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-	    offset + len > i_size_read(inode)) {
-		new_size = offset + len;
-		error = inode_newsize_ok(inode, new_size);
-		if (error)
-			goto out_unlock;
-	}
-
-	error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
-	if (error)
-		goto out_unlock;
-
-	/* Change file size if needed */
-	if (new_size) {
-		struct iattr iattr;
-
-		iattr.ia_valid = ATTR_SIZE;
-		iattr.ia_size = new_size;
-		error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
-	}
-
-out_unlock:
-	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-out_error:
-	return error;
-}
-
 #define XFS_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
 
 /*
@@ -653,7 +597,6 @@
 	.getxattr		= generic_getxattr,
 	.removexattr		= generic_removexattr,
 	.listxattr		= xfs_vn_listxattr,
-	.fallocate		= xfs_vn_fallocate,
 	.fiemap			= xfs_vn_fiemap,
 };
 
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index bd07f73..9731898 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1414,7 +1414,7 @@
 
 	xfs_save_resvblks(mp);
 	xfs_quiesce_attr(mp);
-	return -xfs_fs_log_dummy(mp, SYNC_WAIT);
+	return -xfs_fs_log_dummy(mp);
 }
 
 STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index a02480d..e22f005 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -362,7 +362,7 @@
 
 	/* mark the log as covered if needed */
 	if (xfs_log_need_covered(mp))
-		error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
+		error2 = xfs_fs_log_dummy(mp);
 
 	/* flush data-only devices */
 	if (mp->m_rtdev_targp)
@@ -503,13 +503,14 @@
 	int		error;
 
 	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
-		xfs_log_force(mp, 0);
-		xfs_reclaim_inodes(mp, 0);
 		/* dgc: errors ignored here */
-		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
 		if (mp->m_super->s_frozen == SB_UNFROZEN &&
 		    xfs_log_need_covered(mp))
-			error = xfs_fs_log_dummy(mp, 0);
+			error = xfs_fs_log_dummy(mp);
+		else
+			xfs_log_force(mp, 0);
+		xfs_reclaim_inodes(mp, 0);
+		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
 	}
 	mp->m_sync_seq++;
 	wake_up(&mp->m_wait_single_sync_task);
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index 7bb5092..ee3cee0 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -18,6 +18,7 @@
 #include "xfs.h"
 #include <linux/sysctl.h>
 #include <linux/proc_fs.h>
+#include "xfs_error.h"
 
 static struct ctl_table_header *xfs_table_header;
 
@@ -51,6 +52,26 @@
 
 	return ret;
 }
+
+STATIC int
+xfs_panic_mask_proc_handler(
+	ctl_table	*ctl,
+	int		write,
+	void		__user *buffer,
+	size_t		*lenp,
+	loff_t		*ppos)
+{
+	int		ret, *valp = ctl->data;
+
+	ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
+	if (!ret && write) {
+		xfs_panic_mask = *valp;
+#ifdef DEBUG
+		xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES);
+#endif
+	}
+	return ret;
+}
 #endif /* CONFIG_PROC_FS */
 
 static ctl_table xfs_table[] = {
@@ -77,7 +98,7 @@
 		.data		= &xfs_params.panic_mask.val,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= xfs_panic_mask_proc_handler,
 		.extra1		= &xfs_params.panic_mask.min,
 		.extra2		= &xfs_params.panic_mask.max
 	},
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index 647af2a..2d0bcb4 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -1759,6 +1759,39 @@
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
 
+DECLARE_EVENT_CLASS(xfs_discard_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 xfs_agblock_t agbno, xfs_extlen_t len),
+	TP_ARGS(mp, agno, agbno, len),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, agbno)
+		__field(xfs_extlen_t, len)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->agbno = agbno;
+		__entry->len = len;
+	),
+	TP_printk("dev %d:%d agno %u agbno %u len %u\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno,
+		  __entry->agbno,
+		  __entry->len)
+)
+
+#define DEFINE_DISCARD_EVENT(name) \
+DEFINE_EVENT(xfs_discard_class, name, \
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+		 xfs_agblock_t agbno, xfs_extlen_t len), \
+	TP_ARGS(mp, agno, agbno, len))
+DEFINE_DISCARD_EVENT(xfs_discard_extent);
+DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
+DEFINE_DISCARD_EVENT(xfs_discard_exclude);
+DEFINE_DISCARD_EVENT(xfs_discard_busy);
+
 #endif /* _TRACE_XFS_H */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index 975aa10..e6cf955 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -25,86 +25,78 @@
 #include "xfs_mount.h"
 #include "xfs_error.h"
 
-static char		message[1024];	/* keep it off the stack */
-static DEFINE_SPINLOCK(xfs_err_lock);
-
-/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
-#define XFS_MAX_ERR_LEVEL	7
-#define XFS_ERR_MASK		((1 << 3) - 1)
-static const char * const	err_level[XFS_MAX_ERR_LEVEL+1] =
-					{KERN_EMERG, KERN_ALERT, KERN_CRIT,
-					 KERN_ERR, KERN_WARNING, KERN_NOTICE,
-					 KERN_INFO, KERN_DEBUG};
-
 void
-cmn_err(register int level, char *fmt, ...)
+cmn_err(
+	const char	*lvl,
+	const char	*fmt,
+	...)
 {
-	char	*fp = fmt;
-	int	len;
-	ulong	flags;
-	va_list	ap;
+	struct va_format vaf;
+	va_list		args;
 
-	level &= XFS_ERR_MASK;
-	if (level > XFS_MAX_ERR_LEVEL)
-		level = XFS_MAX_ERR_LEVEL;
-	spin_lock_irqsave(&xfs_err_lock,flags);
-	va_start(ap, fmt);
-	if (*fmt == '!') fp++;
-	len = vsnprintf(message, sizeof(message), fp, ap);
-	if (len >= sizeof(message))
-		len = sizeof(message) - 1;
-	if (message[len-1] == '\n')
-		message[len-1] = 0;
-	printk("%s%s\n", err_level[level], message);
-	va_end(ap);
-	spin_unlock_irqrestore(&xfs_err_lock,flags);
-	BUG_ON(level == CE_PANIC);
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%s%pV", lvl, &vaf);
+	va_end(args);
+
+	BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0);
 }
 
 void
-xfs_fs_vcmn_err(
-	int			level,
+xfs_fs_cmn_err(
+	const char		*lvl,
 	struct xfs_mount	*mp,
-	char			*fmt,
-	va_list			ap)
+	const char		*fmt,
+	...)
 {
-	unsigned long		flags;
-	int			len = 0;
+	struct va_format	vaf;
+	va_list			args;
 
-	level &= XFS_ERR_MASK;
-	if (level > XFS_MAX_ERR_LEVEL)
-		level = XFS_MAX_ERR_LEVEL;
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
 
-	spin_lock_irqsave(&xfs_err_lock,flags);
+	printk("%sFilesystem %s: %pV", lvl, mp->m_fsname, &vaf);
+	va_end(args);
 
-	if (mp) {
-		len = sprintf(message, "Filesystem \"%s\": ", mp->m_fsname);
+	BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0);
+}
 
-		/*
-		 * Skip the printk if we can't print anything useful
-		 * due to an over-long device name.
-		 */
-		if (len >= sizeof(message))
-			goto out;
+/* All callers to xfs_cmn_err use CE_ALERT, so don't bother testing lvl */
+void
+xfs_cmn_err(
+	int			panic_tag,
+	const char		*lvl,
+	struct xfs_mount	*mp,
+	const char		*fmt,
+	...)
+{
+	struct va_format	vaf;
+	va_list			args;
+	int			panic = 0;
+
+	if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
+		printk(KERN_ALERT "XFS: Transforming an alert into a BUG.");
+		panic = 1;
 	}
 
-	len = vsnprintf(message + len, sizeof(message) - len, fmt, ap);
-	if (len >= sizeof(message))
-		len = sizeof(message) - 1;
-	if (message[len-1] == '\n')
-		message[len-1] = 0;
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
 
-	printk("%s%s\n", err_level[level], message);
- out:
-	spin_unlock_irqrestore(&xfs_err_lock,flags);
+	printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf);
+	va_end(args);
 
-	BUG_ON(level == CE_PANIC);
+	BUG_ON(panic);
 }
 
 void
 assfail(char *expr, char *file, int line)
 {
-	printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line);
+	printk(KERN_CRIT "Assertion failed: %s, file: %s, line: %d\n", expr,
+	       file, line);
 	BUG();
 }
 
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index d2d2046..05699f6 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -20,15 +20,22 @@
 
 #include <stdarg.h>
 
-#define CE_DEBUG        7               /* debug        */
-#define CE_CONT         6               /* continuation */
-#define CE_NOTE         5               /* notice       */
-#define CE_WARN         4               /* warning      */
-#define CE_ALERT        1               /* alert        */
-#define CE_PANIC        0               /* panic        */
+struct xfs_mount;
 
-extern void cmn_err(int, char *, ...)
-	__attribute__ ((format (printf, 2, 3)));
+#define CE_DEBUG        KERN_DEBUG
+#define CE_CONT         KERN_INFO
+#define CE_NOTE         KERN_NOTICE
+#define CE_WARN         KERN_WARNING
+#define CE_ALERT        KERN_ALERT
+#define CE_PANIC        KERN_EMERG
+
+void cmn_err(const char *lvl, const char *fmt, ...)
+		__attribute__ ((format (printf, 2, 3)));
+void xfs_fs_cmn_err( const char *lvl, struct xfs_mount *mp,
+		const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
+void xfs_cmn_err( int panic_tag, const char *lvl, struct xfs_mount *mp,
+		const char *fmt, ...) __attribute__ ((format (printf, 4, 5)));
+
 extern void assfail(char *expr, char *f, int l);
 
 #define ASSERT_ALWAYS(expr)	\
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index fa8723f..f322798 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -41,10 +41,6 @@
 #define	XFSA_FIXUP_BNO_OK	1
 #define	XFSA_FIXUP_CNT_OK	2
 
-static int
-xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
-		    xfs_agblock_t bno, xfs_extlen_t len);
-
 /*
  * Prototypes for per-ag allocation routines
  */
@@ -94,7 +90,7 @@
  * Lookup the first record less than or equal to [bno, len]
  * in the btree given by cur.
  */
-STATIC int				/* error */
+int					/* error */
 xfs_alloc_lookup_le(
 	struct xfs_btree_cur	*cur,	/* btree cursor */
 	xfs_agblock_t		bno,	/* starting block of extent */
@@ -127,7 +123,7 @@
 /*
  * Get the data from the pointed-to record.
  */
-STATIC int				/* error */
+int					/* error */
 xfs_alloc_get_rec(
 	struct xfs_btree_cur	*cur,	/* btree cursor */
 	xfs_agblock_t		*bno,	/* output: starting block of extent */
@@ -2615,7 +2611,7 @@
  * will require a synchronous transaction, but it can still be
  * used to distinguish between a partial or exact match.
  */
-static int
+int
 xfs_alloc_busy_search(
 	struct xfs_mount	*mp,
 	xfs_agnumber_t		agno,
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 895009a..0ab56b3 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -19,6 +19,7 @@
 #define	__XFS_ALLOC_H__
 
 struct xfs_buf;
+struct xfs_btree_cur;
 struct xfs_mount;
 struct xfs_perag;
 struct xfs_trans;
@@ -118,16 +119,16 @@
 		struct xfs_perag *pag);
 
 #ifdef __KERNEL__
-
 void
-xfs_alloc_busy_insert(xfs_trans_t *tp,
-		xfs_agnumber_t agno,
-		xfs_agblock_t bno,
-		xfs_extlen_t len);
+xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
+	xfs_agblock_t bno, xfs_extlen_t len);
 
 void
 xfs_alloc_busy_clear(struct xfs_mount *mp, struct xfs_busy_extent *busyp);
 
+int
+xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
+	xfs_agblock_t bno, xfs_extlen_t len);
 #endif	/* __KERNEL__ */
 
 /*
@@ -205,4 +206,18 @@
 	xfs_fsblock_t	bno,	/* starting block number of extent */
 	xfs_extlen_t	len);	/* length of extent */
 
+int					/* error */
+xfs_alloc_lookup_le(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		bno,	/* starting block of extent */
+	xfs_extlen_t		len,	/* length of extent */
+	int			*stat);	/* success/failure */
+
+int					/* error */
+xfs_alloc_get_rec(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		*bno,	/* output: starting block of extent */
+	xfs_extlen_t		*len,	/* output: length of extent */
+	int			*stat);	/* output: success/failure */
+
 #endif	/* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index ed2b65f..98c6f73 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -141,7 +141,6 @@
 #define		xfs_buf_item_log_check(x)
 #endif
 
-STATIC void	xfs_buf_error_relse(xfs_buf_t *bp);
 STATIC void	xfs_buf_do_callbacks(struct xfs_buf *bp);
 
 /*
@@ -959,86 +958,70 @@
  */
 void
 xfs_buf_iodone_callbacks(
-	xfs_buf_t	*bp)
+	struct xfs_buf		*bp)
 {
-	xfs_log_item_t	*lip;
-	static ulong	lasttime;
-	static xfs_buftarg_t *lasttarg;
-	xfs_mount_t	*mp;
+	struct xfs_log_item	*lip = bp->b_fspriv;
+	struct xfs_mount	*mp = lip->li_mountp;
+	static ulong		lasttime;
+	static xfs_buftarg_t	*lasttarg;
 
-	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-	lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+	if (likely(!XFS_BUF_GETERROR(bp)))
+		goto do_callbacks;
 
-	if (XFS_BUF_GETERROR(bp) != 0) {
-		/*
-		 * If we've already decided to shutdown the filesystem
-		 * because of IO errors, there's no point in giving this
-		 * a retry.
-		 */
-		mp = lip->li_mountp;
-		if (XFS_FORCED_SHUTDOWN(mp)) {
-			ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
-			XFS_BUF_SUPER_STALE(bp);
-			trace_xfs_buf_item_iodone(bp, _RET_IP_);
-			xfs_buf_do_callbacks(bp);
-			XFS_BUF_SET_FSPRIVATE(bp, NULL);
-			XFS_BUF_CLR_IODONE_FUNC(bp);
-			xfs_buf_ioend(bp, 0);
-			return;
-		}
+	/*
+	 * If we've already decided to shutdown the filesystem because of
+	 * I/O errors, there's no point in giving this a retry.
+	 */
+	if (XFS_FORCED_SHUTDOWN(mp)) {
+		XFS_BUF_SUPER_STALE(bp);
+		trace_xfs_buf_item_iodone(bp, _RET_IP_);
+		goto do_callbacks;
+	}
 
-		if ((XFS_BUF_TARGET(bp) != lasttarg) ||
-		    (time_after(jiffies, (lasttime + 5*HZ)))) {
-			lasttime = jiffies;
-			cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
-					" block 0x%llx in %s",
-				XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
-			      (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
-		}
-		lasttarg = XFS_BUF_TARGET(bp);
+	if (XFS_BUF_TARGET(bp) != lasttarg ||
+	    time_after(jiffies, (lasttime + 5*HZ))) {
+		lasttime = jiffies;
+		cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
+				" block 0x%llx in %s",
+			XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
+		      (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
+	}
+	lasttarg = XFS_BUF_TARGET(bp);
 
-		if (XFS_BUF_ISASYNC(bp)) {
-			/*
-			 * If the write was asynchronous then noone will be
-			 * looking for the error.  Clear the error state
-			 * and write the buffer out again delayed write.
-			 *
-			 * XXXsup This is OK, so long as we catch these
-			 * before we start the umount; we don't want these
-			 * DELWRI metadata bufs to be hanging around.
-			 */
-			XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */
+	/*
+	 * If the write was asynchronous then noone will be looking for the
+	 * error.  Clear the error state and write the buffer out again.
+	 *
+	 * During sync or umount we'll write all pending buffers again
+	 * synchronous, which will catch these errors if they keep hanging
+	 * around.
+	 */
+	if (XFS_BUF_ISASYNC(bp)) {
+		XFS_BUF_ERROR(bp, 0); /* errno of 0 unsets the flag */
 
-			if (!(XFS_BUF_ISSTALE(bp))) {
-				XFS_BUF_DELAYWRITE(bp);
-				XFS_BUF_DONE(bp);
-				XFS_BUF_SET_START(bp);
-			}
-			ASSERT(XFS_BUF_IODONE_FUNC(bp));
-			trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
-			xfs_buf_relse(bp);
-		} else {
-			/*
-			 * If the write of the buffer was not asynchronous,
-			 * then we want to make sure to return the error
-			 * to the caller of bwrite().  Because of this we
-			 * cannot clear the B_ERROR state at this point.
-			 * Instead we install a callback function that
-			 * will be called when the buffer is released, and
-			 * that routine will clear the error state and
-			 * set the buffer to be written out again after
-			 * some delay.
-			 */
-			/* We actually overwrite the existing b-relse
-			   function at times, but we're gonna be shutting down
-			   anyway. */
-			XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse);
+		if (!XFS_BUF_ISSTALE(bp)) {
+			XFS_BUF_DELAYWRITE(bp);
 			XFS_BUF_DONE(bp);
-			XFS_BUF_FINISH_IOWAIT(bp);
+			XFS_BUF_SET_START(bp);
 		}
+		ASSERT(XFS_BUF_IODONE_FUNC(bp));
+		trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
+		xfs_buf_relse(bp);
 		return;
 	}
 
+	/*
+	 * If the write of the buffer was synchronous, we want to make
+	 * sure to return the error to the caller of xfs_bwrite().
+	 */
+	XFS_BUF_STALE(bp);
+	XFS_BUF_DONE(bp);
+	XFS_BUF_UNDELAYWRITE(bp);
+
+	trace_xfs_buf_error_relse(bp, _RET_IP_);
+	xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+
+do_callbacks:
 	xfs_buf_do_callbacks(bp);
 	XFS_BUF_SET_FSPRIVATE(bp, NULL);
 	XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1046,42 +1029,6 @@
 }
 
 /*
- * This is a callback routine attached to a buffer which gets an error
- * when being written out synchronously.
- */
-STATIC void
-xfs_buf_error_relse(
-	xfs_buf_t	*bp)
-{
-	xfs_log_item_t	*lip;
-	xfs_mount_t	*mp;
-
-	lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
-	mp = (xfs_mount_t *)lip->li_mountp;
-	ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
-
-	XFS_BUF_STALE(bp);
-	XFS_BUF_DONE(bp);
-	XFS_BUF_UNDELAYWRITE(bp);
-	XFS_BUF_ERROR(bp,0);
-
-	trace_xfs_buf_error_relse(bp, _RET_IP_);
-
-	if (! XFS_FORCED_SHUTDOWN(mp))
-		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
-	/*
-	 * We have to unpin the pinned buffers so do the
-	 * callbacks.
-	 */
-	xfs_buf_do_callbacks(bp);
-	XFS_BUF_SET_FSPRIVATE(bp, NULL);
-	XFS_BUF_CLR_IODONE_FUNC(bp);
-	XFS_BUF_SET_BRELSE_FUNC(bp,NULL);
-	xfs_buf_relse(bp);
-}
-
-
-/*
  * This is the iodone() function for buffers which have been
  * logged.  It is called when they are eventually flushed out.
  * It should remove the buf item from the AIL, and free the buf item.
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index c78cc6a..4c7db74 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -152,37 +152,6 @@
 }
 #endif /* DEBUG */
 
-
-void
-xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...)
-{
-	va_list ap;
-
-	va_start(ap, fmt);
-	xfs_fs_vcmn_err(level, mp, fmt, ap);
-	va_end(ap);
-}
-
-void
-xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...)
-{
-	va_list ap;
-
-#ifdef DEBUG
-	xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES);
-#endif
-
-	if (xfs_panic_mask && (xfs_panic_mask & panic_tag)
-	    && (level & CE_ALERT)) {
-		level &= ~CE_ALERT;
-		level |= CE_PANIC;
-		cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG.");
-	}
-	va_start(ap, fmt);
-	xfs_fs_vcmn_err(level, mp, fmt, ap);
-	va_end(ap);
-}
-
 void
 xfs_error_report(
 	const char		*tag,
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index f338847..10dce54 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -136,8 +136,8 @@
 	 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
 			(rf))))
 
-extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
-extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
+extern int xfs_errortag_add(int error_tag, struct xfs_mount *mp);
+extern int xfs_errortag_clearall(struct xfs_mount *mp, int loud);
 #else
 #define XFS_TEST_ERROR(expr, mp, tag, rf)	(expr)
 #define xfs_errortag_add(tag, mp)		(ENOSYS)
@@ -162,21 +162,15 @@
 
 struct xfs_mount;
 
-extern void xfs_fs_vcmn_err(int level, struct xfs_mount *mp,
-		char *fmt, va_list ap)
-	__attribute__ ((format (printf, 3, 0)));
-extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp,
-			char *fmt, ...)
-	__attribute__ ((format (printf, 4, 5)));
-extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...)
-	__attribute__ ((format (printf, 3, 4)));
-
 extern void xfs_hex_dump(void *p, int length);
 
 #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \
 	xfs_fs_cmn_err(level, mp, fmt "  Unmount and run xfs_repair.", ## args)
 
 #define xfs_fs_mount_cmn_err(f, fmt, args...) \
-	((f & XFS_MFSI_QUIET)? (void)0 : cmn_err(CE_WARN, "XFS: " fmt, ## args))
+	do { \
+		if (!(f & XFS_MFSI_QUIET)) 	\
+			cmn_err(CE_WARN, "XFS: " fmt, ## args); \
+	} while (0)
 
 #endif	/* __XFS_ERROR_H__ */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index f56d30e..cec89dd 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -612,12 +612,13 @@
  *
  * We cannot use an inode here for this - that will push dirty state back up
  * into the VFS and then periodic inode flushing will prevent log covering from
- * making progress. Hence we log a field in the superblock instead.
+ * making progress. Hence we log a field in the superblock instead and use a
+ * synchronous transaction to ensure the superblock is immediately unpinned
+ * and can be written back.
  */
 int
 xfs_fs_log_dummy(
-	xfs_mount_t	*mp,
-	int		flags)
+	xfs_mount_t	*mp)
 {
 	xfs_trans_t	*tp;
 	int		error;
@@ -632,8 +633,7 @@
 
 	/* log the UUID because it is an unchanging field */
 	xfs_mod_sb(tp, XFS_SB_UUID);
-	if (flags & SYNC_WAIT)
-		xfs_trans_set_sync(tp);
+	xfs_trans_set_sync(tp);
 	return xfs_trans_commit(tp, 0);
 }
 
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index a786c52..1b6a98b 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -25,6 +25,6 @@
 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
 				xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
-extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags);
+extern int xfs_fs_log_dummy(struct xfs_mount *mp);
 
 #endif	/* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 0bf24b1..ae6fef1f 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -377,7 +377,7 @@
 		cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname);
 	else {
 		cmn_err(CE_NOTE,
-			"!Mounting filesystem \"%s\" in no-recovery mode.  Filesystem will be inconsistent.",
+			"Mounting filesystem \"%s\" in no-recovery mode.  Filesystem will be inconsistent.",
 			mp->m_fsname);
 		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 	}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 204d8e5..aa0ebb7 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3800,7 +3800,7 @@
 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
 	} else {
 		cmn_err(CE_DEBUG,
-			"!Ending clean XFS mount for filesystem: %s\n",
+			"Ending clean XFS mount for filesystem: %s\n",
 			log->l_mp->m_fsname);
 	}
 	return 0;
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f80a067..33dbc4e 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1137,7 +1137,7 @@
 	if (blkdelta)
 		xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
 out:
-	ASSERT(error = 0);
+	ASSERT(error == 0);
 	return;
 }
 
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f1eddf7..31b6188 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -87,14 +87,6 @@
 	pmd_clear(mm, address, pmdp);
 	return pmd;
 })
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
-				       unsigned long address,
-				       pmd_t *pmdp)
-{
-	BUG();
-	return __pmd(0);
-}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
@@ -163,9 +155,9 @@
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
-			      unsigned long address,
-			      pmd_t *pmdp);
+extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
+				  unsigned long address,
+				  pmd_t *pmdp);
 #endif
 
 #ifndef __HAVE_ARCH_PTE_SAME
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index aac27bd..f22e7fe 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -121,6 +121,9 @@
 void drm_fb_helper_restore(void);
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height);
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+			    uint32_t depth);
+
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
 bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 521a0f8..3111385 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -12,7 +12,6 @@
  *
  * Please credit ARM.com
  * Documentation: ARM DDI 0196D
- *
  */
 
 #ifndef AMBA_PL08X_H
@@ -22,6 +21,15 @@
 #include <linux/dmaengine.h>
 #include <linux/interrupt.h>
 
+struct pl08x_lli;
+struct pl08x_driver_data;
+
+/* Bitmasks for selecting AHB ports for DMA transfers */
+enum {
+	PL08X_AHB1 = (1 << 0),
+	PL08X_AHB2 = (1 << 1)
+};
+
 /**
  * struct pl08x_channel_data - data structure to pass info between
  * platform and PL08x driver regarding channel configuration
@@ -46,8 +54,10 @@
  * @circular_buffer: whether the buffer passed in is circular and
  * shall simply be looped round round (like a record baby round
  * round round round)
- * @single: the device connected to this channel will request single
- * DMA transfers, not bursts. (Bursts are default.)
+ * @single: the device connected to this channel will request single DMA
+ * transfers, not bursts. (Bursts are default.)
+ * @periph_buses: the device connected to this channel is accessible via
+ * these buses (use PL08X_AHB1 | PL08X_AHB2).
  */
 struct pl08x_channel_data {
 	char *bus_id;
@@ -55,10 +65,10 @@
 	int max_signal;
 	u32 muxval;
 	u32 cctl;
-	u32 ccfg;
 	dma_addr_t addr;
 	bool circular_buffer;
 	bool single;
+	u8 periph_buses;
 };
 
 /**
@@ -67,24 +77,23 @@
  * @addr: current address
  * @maxwidth: the maximum width of a transfer on this bus
  * @buswidth: the width of this bus in bytes: 1, 2 or 4
- * @fill_bytes: bytes required to fill to the next bus memory
- * boundary
+ * @fill_bytes: bytes required to fill to the next bus memory boundary
  */
 struct pl08x_bus_data {
 	dma_addr_t addr;
 	u8 maxwidth;
 	u8 buswidth;
-	u32 fill_bytes;
+	size_t fill_bytes;
 };
 
 /**
  * struct pl08x_phy_chan - holder for the physical channels
  * @id: physical index to this channel
  * @lock: a lock to use when altering an instance of this struct
- * @signal: the physical signal (aka channel) serving this
- * physical channel right now
- * @serving: the virtual channel currently being served by this
- * physical channel
+ * @signal: the physical signal (aka channel) serving this physical channel
+ * right now
+ * @serving: the virtual channel currently being served by this physical
+ * channel
  */
 struct pl08x_phy_chan {
 	unsigned int id;
@@ -92,11 +101,6 @@
 	spinlock_t lock;
 	int signal;
 	struct pl08x_dma_chan *serving;
-	u32 csrc;
-	u32 cdst;
-	u32 clli;
-	u32 cctl;
-	u32 ccfg;
 };
 
 /**
@@ -108,26 +112,23 @@
 	struct dma_async_tx_descriptor tx;
 	struct list_head node;
 	enum dma_data_direction	direction;
-	struct pl08x_bus_data srcbus;
-	struct pl08x_bus_data dstbus;
-	int len;
+	dma_addr_t src_addr;
+	dma_addr_t dst_addr;
+	size_t len;
 	dma_addr_t llis_bus;
-	void *llis_va;
-	struct pl08x_channel_data *cd;
-	bool active;
+	struct pl08x_lli *llis_va;
+	/* Default cctl value for LLIs */
+	u32 cctl;
 	/*
 	 * Settings to be put into the physical channel when we
-	 * trigger this txd
+	 * trigger this txd.  Other registers are in llis_va[0].
 	 */
-	u32 csrc;
-	u32 cdst;
-	u32 clli;
-	u32 cctl;
+	u32 ccfg;
 };
 
 /**
- * struct pl08x_dma_chan_state - holds the PL08x specific virtual
- * channel states
+ * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
+ * states
  * @PL08X_CHAN_IDLE: the channel is idle
  * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
  * channel and is running a transfer on it
@@ -147,6 +148,8 @@
  * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
  * @chan: wrappped abstract channel
  * @phychan: the physical channel utilized by this channel, if there is one
+ * @phychan_hold: if non-zero, hold on to the physical channel even if we
+ * have no pending entries
  * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
  * @name: name of channel
  * @cd: channel platform data
@@ -154,53 +157,49 @@
  * @runtime_direction: current direction of this channel according to
  * runtime config
  * @lc: last completed transaction on this channel
- * @desc_list: queued transactions pending on this channel
+ * @pend_list: queued transactions pending on this channel
  * @at: active transaction on this channel
- * @lockflags: sometimes we let a lock last between two function calls,
- * especially prep/submit, and then we need to store the IRQ flags
- * in the channel state, here
  * @lock: a lock for this channel data
  * @host: a pointer to the host (internal use)
  * @state: whether the channel is idle, paused, running etc
  * @slave: whether this channel is a device (slave) or for memcpy
- * @waiting: a TX descriptor on this channel which is waiting for
- * a physical channel to become available
+ * @waiting: a TX descriptor on this channel which is waiting for a physical
+ * channel to become available
  */
 struct pl08x_dma_chan {
 	struct dma_chan chan;
 	struct pl08x_phy_chan *phychan;
+	int phychan_hold;
 	struct tasklet_struct tasklet;
 	char *name;
 	struct pl08x_channel_data *cd;
 	dma_addr_t runtime_addr;
 	enum dma_data_direction	runtime_direction;
-	atomic_t last_issued;
 	dma_cookie_t lc;
-	struct list_head desc_list;
+	struct list_head pend_list;
 	struct pl08x_txd *at;
-	unsigned long lockflags;
 	spinlock_t lock;
-	void *host;
+	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
 	bool slave;
 	struct pl08x_txd *waiting;
 };
 
 /**
- * struct pl08x_platform_data - the platform configuration for the
- * PL08x PrimeCells.
+ * struct pl08x_platform_data - the platform configuration for the PL08x
+ * PrimeCells.
  * @slave_channels: the channels defined for the different devices on the
  * platform, all inclusive, including multiplexed channels. The available
- * physical channels will be multiplexed around these signals as they
- * are requested, just enumerate all possible channels.
- * @get_signal: request a physical signal to be used for a DMA
- * transfer immediately: if there is some multiplexing or similar blocking
- * the use of the channel the transfer can be denied by returning
- * less than zero, else it returns the allocated signal number
+ * physical channels will be multiplexed around these signals as they are
+ * requested, just enumerate all possible channels.
+ * @get_signal: request a physical signal to be used for a DMA transfer
+ * immediately: if there is some multiplexing or similar blocking the use
+ * of the channel the transfer can be denied by returning less than zero,
+ * else it returns the allocated signal number
  * @put_signal: indicate to the platform that this physical signal is not
  * running any DMA transfer and multiplexing can be recycled
- * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
- * LLI addresses are on 0/1 Master 1/2.
+ * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
+ * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
  */
 struct pl08x_platform_data {
 	struct pl08x_channel_data *slave_channels;
@@ -208,6 +207,8 @@
 	struct pl08x_channel_data memcpy_channel;
 	int (*get_signal)(struct pl08x_dma_chan *);
 	void (*put_signal)(struct pl08x_dma_chan *);
+	u8 lli_buses;
+	u8 mem_buses;
 };
 
 #ifdef CONFIG_AMBA_PL08X
diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h
index 8b49ac4..e02982f 100644
--- a/include/linux/auto_fs4.h
+++ b/include/linux/auto_fs4.h
@@ -24,7 +24,7 @@
 #define AUTOFS_MIN_PROTO_VERSION	3
 #define AUTOFS_MAX_PROTO_VERSION	5
 
-#define AUTOFS_PROTO_SUBVERSION		1
+#define AUTOFS_PROTO_SUBVERSION		2
 
 /* Mask for expire behaviour */
 #define AUTOFS_EXP_IMMEDIATE		1
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 59fcd24..f958c19 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -167,6 +167,8 @@
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)(struct dentry *, char *, int);
+	struct vfsmount *(*d_automount)(struct path *);
+	int (*d_manage)(struct dentry *, bool, bool);
 } ____cacheline_aligned;
 
 /*
@@ -205,13 +207,18 @@
 
 #define DCACHE_CANT_MOUNT	0x0100
 #define DCACHE_GENOCIDE		0x0200
-#define DCACHE_MOUNTED		0x0400	/* is a mountpoint */
 
 #define DCACHE_OP_HASH		0x1000
 #define DCACHE_OP_COMPARE	0x2000
 #define DCACHE_OP_REVALIDATE	0x4000
 #define DCACHE_OP_DELETE	0x8000
 
+#define DCACHE_MOUNTED		0x10000	/* is a mountpoint */
+#define DCACHE_NEED_AUTOMOUNT	0x20000	/* handle automount on this dir */
+#define DCACHE_MANAGE_TRANSIT	0x40000	/* manage transit from this dirent */
+#define DCACHE_MANAGED_DENTRY \
+	(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
+
 extern seqlock_t rename_lock;
 
 static inline int dname_external(struct dentry *dentry)
@@ -399,7 +406,12 @@
 
 extern void dput(struct dentry *);
 
-static inline int d_mountpoint(struct dentry *dentry)
+static inline bool d_managed(struct dentry *dentry)
+{
+	return dentry->d_flags & DCACHE_MANAGED_DENTRY;
+}
+
+static inline bool d_mountpoint(struct dentry *dentry)
 {
 	return dentry->d_flags & DCACHE_MOUNTED;
 }
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8cd00ad..9bebd7f 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -532,7 +532,7 @@
 	return dmaengine_device_control(chan, DMA_RESUME, 0);
 }
 
-static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc)
+static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
 {
 	return desc->tx_submit(desc);
 }
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index bec8b82..ab68f78 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -99,6 +99,17 @@
 }
 
 /**
+ * is_unicast_ether_addr - Determine if the Ethernet address is unicast
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if the address is a unicast address.
+ */
+static inline int is_unicast_ether_addr(const u8 *addr)
+{
+	return !is_multicast_ether_addr(addr);
+}
+
+/**
  * is_valid_ether_addr - Determine if the given Ethernet address is valid
  * @addr: Pointer to a six-byte array containing the Ethernet address
  *
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index afc00af..a562fa5 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -45,6 +45,7 @@
 #define AT_REMOVEDIR		0x200   /* Remove directory instead of
                                            unlinking file.  */
 #define AT_SYMLINK_FOLLOW	0x400   /* Follow symbolic links.  */
+#define AT_NO_AUTOMOUNT		0x800	/* Suppress terminal automount traversal */
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/file.h b/include/linux/file.h
index b1e1297..e85baeb 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -23,7 +23,7 @@
 
 static inline void fput_light(struct file *file, int fput_needed)
 {
-	if (unlikely(fput_needed))
+	if (fput_needed)
 		fput(file);
 }
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3984f23..32b38cd 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -242,6 +242,7 @@
 #define S_SWAPFILE	256	/* Do not truncate: swapon got its bmaps */
 #define S_PRIVATE	512	/* Inode is fs-internal */
 #define S_IMA		1024	/* Inode has an associated IMA struct */
+#define S_AUTOMOUNT	2048	/* Automount/referral quasi-directory */
 
 /*
  * Note that nosuid etc flags are inode-specific: setting some file-system
@@ -277,6 +278,7 @@
 #define IS_SWAPFILE(inode)	((inode)->i_flags & S_SWAPFILE)
 #define IS_PRIVATE(inode)	((inode)->i_flags & S_PRIVATE)
 #define IS_IMA(inode)		((inode)->i_flags & S_IMA)
+#define IS_AUTOMOUNT(inode)	((inode)->i_flags & S_AUTOMOUNT)
 
 /* the read-only stuff doesn't really belong here, but any other place is
    probably as bad and I don't want to create yet another include file. */
@@ -666,7 +668,7 @@
 	int			bd_holders;
 	bool			bd_write_holder;
 #ifdef CONFIG_SYSFS
-	struct gendisk *	bd_holder_disk;	/* for sysfs slave linkng */
+	struct list_head	bd_holder_disks;
 #endif
 	struct block_device *	bd_contains;
 	unsigned		bd_block_size;
@@ -1066,7 +1068,6 @@
 	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_break)(struct file_lock *);
-	int (*fl_mylease)(struct file_lock *, struct file_lock *);
 	int (*fl_change)(struct file_lock **, int);
 };
 
@@ -1482,8 +1483,8 @@
 	unsigned int fi_flags;		/* Flags as passed from user */
 	unsigned int fi_extents_mapped;	/* Number of mapped extents */
 	unsigned int fi_extents_max;	/* Size of fiemap_extent array */
-	struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
-						 * array */
+	struct fiemap_extent __user *fi_extents_start; /* Start of
+							fiemap_extent array */
 };
 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
 			    u64 phys, u64 len, u32 flags);
@@ -1551,6 +1552,8 @@
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
 	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
 	int (*setlease)(struct file *, long, struct file_lock **);
+	long (*fallocate)(struct file *file, int mode, loff_t offset,
+			  loff_t len);
 };
 
 #define IPERM_FLAG_RCU	0x0001
@@ -1581,8 +1584,6 @@
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
 	void (*truncate_range)(struct inode *, loff_t, loff_t);
-	long (*fallocate)(struct inode *inode, int mode, loff_t offset,
-			  loff_t len);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
 		      u64 len);
 } ____cacheline_aligned;
@@ -2058,12 +2059,18 @@
 extern int blkdev_put(struct block_device *bdev, fmode_t mode);
 #ifdef CONFIG_SYSFS
 extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+extern void bd_unlink_disk_holder(struct block_device *bdev,
+				  struct gendisk *disk);
 #else
 static inline int bd_link_disk_holder(struct block_device *bdev,
 				      struct gendisk *disk)
 {
 	return 0;
 }
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+					 struct gendisk *disk)
+{
+}
 #endif
 #endif
 
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 24376fe..8122018d 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -165,6 +165,12 @@
 extern void put_page_bootmem(struct page *page);
 #endif
 
+/*
+ * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug
+ * notifier will be called under this. 2) offline/online/add/remove memory
+ * will not run simultaneously.
+ */
+
 void lock_memory_hotplug(void);
 void unlock_memory_hotplug(void);
 
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 1869ea2..604f122 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -60,7 +60,7 @@
 	struct super_block *mnt_sb;	/* pointer to superblock */
 #ifdef CONFIG_SMP
 	struct mnt_pcp __percpu *mnt_pcp;
-	atomic_t mnt_longrefs;
+	atomic_t mnt_longterm;		/* how many of the refs are longterm */
 #else
 	int mnt_count;
 	int mnt_writers;
@@ -96,8 +96,6 @@
 extern void mnt_drop_write(struct vfsmount *mnt);
 extern void mntput(struct vfsmount *mnt);
 extern struct vfsmount *mntget(struct vfsmount *mnt);
-extern void mntput_long(struct vfsmount *mnt);
-extern struct vfsmount *mntget_long(struct vfsmount *mnt);
 extern void mnt_pin(struct vfsmount *mnt);
 extern void mnt_unpin(struct vfsmount *mnt);
 extern int __mnt_is_readonly(struct vfsmount *mnt);
@@ -110,12 +108,7 @@
 				      int flags, const char *name,
 				      void *data);
 
-struct nameidata;
-
-struct path;
-extern int do_add_mount(struct vfsmount *newmnt, struct path *path,
-			int mnt_flags, struct list_head *fslist);
-
+extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
 extern void mark_mounts_for_expiry(struct list_head *mounts);
 
 extern dev_t name_to_dev_t(char *name);
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 4dd0c2c..a9baee6 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -527,8 +527,7 @@
 struct cfi_fixup {
 	uint16_t mfr;
 	uint16_t id;
-	void (*fixup)(struct mtd_info *mtd, void* param);
-	void* param;
+	void (*fixup)(struct mtd_info *mtd);
 };
 
 #define CFI_MFR_ANY		0xFFFF
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index 5d25567..6987995 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -16,6 +16,7 @@
 #ifndef __MTD_FSMC_H
 #define __MTD_FSMC_H
 
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/physmap.h>
 #include <linux/types.h>
@@ -27,7 +28,7 @@
 
 /*
  * The placement of the Command Latch Enable (CLE) and
- * Address Latch Enable (ALE) is twised around in the
+ * Address Latch Enable (ALE) is twisted around in the
  * SPEAR310 implementation.
  */
 #if defined(CONFIG_MACH_SPEAR310)
@@ -62,7 +63,7 @@
 
 /* ctrl_tim register definitions */
 
-struct fsms_nand_bank_regs {
+struct fsmc_nand_bank_regs {
 	uint32_t pc;
 	uint32_t sts;
 	uint32_t comm;
@@ -78,7 +79,7 @@
 struct fsmc_regs {
 	struct fsmc_nor_bank_regs nor_bank_regs[FSMC_MAX_NOR_BANKS];
 	uint8_t reserved_1[0x40 - 0x20];
-	struct fsms_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS];
+	struct fsmc_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS];
 	uint8_t reserved_2[0xfe0 - 0xc0];
 	uint32_t peripid0;			/* 0xfe0 */
 	uint32_t peripid1;			/* 0xfe4 */
@@ -114,25 +115,6 @@
 #define FSMC_THOLD_4		(4 << 16)
 #define FSMC_THIZ_1		(1 << 24)
 
-/* peripid2 register definitions */
-#define FSMC_REVISION_MSK	(0xf)
-#define FSMC_REVISION_SHFT	(0x4)
-
-#define FSMC_VER1		1
-#define FSMC_VER2		2
-#define FSMC_VER3		3
-#define FSMC_VER4		4
-#define FSMC_VER5		5
-#define FSMC_VER6		6
-#define FSMC_VER7		7
-#define FSMC_VER8		8
-
-static inline uint32_t get_fsmc_version(struct fsmc_regs *regs)
-{
-	return (readl(&regs->peripid2) >> FSMC_REVISION_SHFT) &
-				FSMC_REVISION_MSK;
-}
-
 /*
  * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
  * and it has to be read consecutively and immediately after the 512
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index fe8d77e..9d5306b 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -144,6 +144,17 @@
 	 */
 	uint32_t writesize;
 
+	/*
+	 * Size of the write buffer used by the MTD. MTD devices having a write
+	 * buffer can write multiple writesize chunks at a time. E.g. while
+	 * writing 4 * writesize bytes to a device with 2 * writesize bytes
+	 * buffer the MTD driver can (but doesn't have to) do 2 writesize
+	 * operations, but not 4. Currently, all NANDs have writebufsize
+	 * equivalent to writesize (NAND page size). Some NOR flashes do have
+	 * writebufsize greater than writesize.
+	 */
+	uint32_t writebufsize;
+
 	uint32_t oobsize;   // Amount of OOB data per block (e.g. 16)
 	uint32_t oobavail;  // Available OOB bytes per block
 
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 63e17d0..1f489b2 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -448,6 +448,8 @@
  *			See the defines for further explanation.
  * @badblockpos:	[INTERN] position of the bad block marker in the oob
  *			area.
+ * @badblockbits:	[INTERN] number of bits to left-shift the bad block
+ *			number
  * @cellinfo:		[INTERN] MLC/multichip data from chip ident
  * @numchips:		[INTERN] number of physical chips
  * @chipsize:		[INTERN] the size of one chip for multichip arrays
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 0c8815b..ae418e4 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -118,6 +118,8 @@
 	int (*chip_probe)(struct mtd_info *mtd);
 	int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
 	int (*scan_bbt)(struct mtd_info *mtd);
+	int (*enable)(struct mtd_info *mtd);
+	int (*disable)(struct mtd_info *mtd);
 
 	struct completion	complete;
 	int			irq;
@@ -137,6 +139,14 @@
 	void			*bbm;
 
 	void			*priv;
+
+	/*
+	 * Shows that the current operation is composed
+	 * of sequence of commands. For example, cache program.
+	 * Such command status OnGo bit is checked at the end of
+	 * sequence.
+	 */
+	unsigned int		ongoing;
 };
 
 /*
@@ -171,6 +181,9 @@
 #define ONENAND_IS_2PLANE(this)			(0)
 #endif
 
+#define ONENAND_IS_CACHE_PROGRAM(this)					\
+	(this->options & ONENAND_HAS_CACHE_PROGRAM)
+
 /* Check byte access in OneNAND */
 #define ONENAND_CHECK_BYTE_ACCESS(addr)		(addr & 0x1)
 
@@ -181,6 +194,7 @@
 #define ONENAND_HAS_UNLOCK_ALL		(0x0002)
 #define ONENAND_HAS_2PLANE		(0x0004)
 #define ONENAND_HAS_4KB_PAGE		(0x0008)
+#define ONENAND_HAS_CACHE_PROGRAM	(0x0010)
 #define ONENAND_SKIP_UNLOCK_CHECK	(0x0100)
 #define ONENAND_PAGEBUF_ALLOC		(0x1000)
 #define ONENAND_OOBBUF_ALLOC		(0x2000)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 2b54316..4a0a8ba 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -89,7 +89,7 @@
 static inline int mtd_has_cmdlinepart(void) { return 0; }
 #endif
 
-int mtd_is_master(struct mtd_info *mtd);
+int mtd_is_partition(struct mtd_info *mtd);
 int mtd_add_partition(struct mtd_info *master, char *name,
 		      long long offset, long long length);
 int mtd_del_partition(struct mtd_info *master, int partno);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 18d06ad..f276d4f 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -45,6 +45,7 @@
  *  - ending slashes ok even for nonexistent files
  *  - internal "there are more path components" flag
  *  - dentry cache is untrusted; force a real lookup
+ *  - suppress terminal automount
  */
 #define LOOKUP_FOLLOW		0x0001
 #define LOOKUP_DIRECTORY	0x0002
@@ -53,6 +54,7 @@
 #define LOOKUP_PARENT		0x0010
 #define LOOKUP_REVAL		0x0020
 #define LOOKUP_RCU		0x0040
+#define LOOKUP_NO_AUTOMOUNT	0x0080
 /*
  * Intent data
  */
@@ -79,7 +81,8 @@
 
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 
-extern int follow_down(struct path *);
+extern int follow_down_one(struct path *);
+extern int follow_down(struct path *, bool);
 extern int follow_up(struct path *);
 
 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index be4957c..d971346 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -520,9 +520,6 @@
 	 * please use this field instead of dev->trans_start
 	 */
 	unsigned long		trans_start;
-	u64			tx_bytes;
-	u64			tx_packets;
-	u64			tx_dropped;
 } ____cacheline_aligned_in_smp;
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -2265,8 +2262,6 @@
 extern void		dev_mcast_init(void);
 extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 					       struct rtnl_link_stats64 *storage);
-extern void		dev_txq_stats_fold(const struct net_device *dev,
-					   struct rtnl_link_stats64 *stats);
 
 extern int		netdev_max_backlog;
 extern int		netdev_tstamp_prequeue;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 9b46300..134716e 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -65,6 +65,9 @@
 
 #define NFS4_CDFC4_FORE	0x1
 #define NFS4_CDFC4_BACK 0x2
+#define NFS4_CDFC4_BOTH 0x3
+#define NFS4_CDFC4_FORE_OR_BOTH 0x3
+#define NFS4_CDFC4_BACK_OR_BOTH 0x7
 
 #define NFS4_SET_TO_SERVER_TIME	0
 #define NFS4_SET_TO_CLIENT_TIME	1
@@ -140,6 +143,9 @@
 #define SEQ4_STATUS_CB_PATH_DOWN_SESSION	0x00000200
 #define SEQ4_STATUS_BACKCHANNEL_FAULT		0x00000400
 
+#define NFS4_SECINFO_STYLE4_CURRENT_FH	0
+#define NFS4_SECINFO_STYLE4_PARENT	1
+
 #define NFS4_MAX_UINT64	(~(u64)0)
 
 /* An NFS4 sessions server must support at least NFS4_MAX_OPS operations.
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 0779bb8..6023efa 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -215,7 +215,6 @@
 #define NFS_INO_ADVISE_RDPLUS	(0)		/* advise readdirplus */
 #define NFS_INO_STALE		(1)		/* possible stale inode */
 #define NFS_INO_ACL_LRU_SET	(2)		/* Inode is on the LRU list */
-#define NFS_INO_MOUNTPOINT	(3)		/* inode is remote mountpoint */
 #define NFS_INO_FLUSHING	(4)		/* inode is flushing out data */
 #define NFS_INO_FSCACHE		(5)		/* inode can be cached by FS-Cache */
 #define NFS_INO_FSCACHE_LOCK	(6)		/* FS-Cache cookie management lock */
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 8ae78a6..bd31615 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -35,7 +35,7 @@
 #define NFSEXP_NOHIDE		0x0200
 #define NFSEXP_NOSUBTREECHECK	0x0400
 #define	NFSEXP_NOAUTHNLM	0x0800		/* Don't authenticate NLM requests - just trust */
-#define NFSEXP_MSNFS		0x1000	/* do silly things that MS clients expect */
+#define NFSEXP_MSNFS		0x1000	/* do silly things that MS clients expect; no longer supported */
 #define NFSEXP_FSID		0x2000
 #define	NFSEXP_CROSSMOUNT	0x4000
 #define	NFSEXP_NOACL		0x8000	/* reserved for possible ACL related use */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2b89b71..821ffb9 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -148,6 +148,10 @@
  * @NL80211_CMD_SET_MPATH:  Set mesh path attributes for mesh path to
  * 	destination %NL80211_ATTR_MAC on the interface identified by
  * 	%NL80211_ATTR_IFINDEX.
+ * @NL80211_CMD_NEW_MPATH: Create a new mesh path for the destination given by
+ *	%NL80211_ATTR_MAC via %NL80211_ATTR_MPATH_NEXT_HOP.
+ * @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by
+ *	%NL80211_ATTR_MAC.
  * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the
  *	the interface identified by %NL80211_ATTR_IFINDEX.
  * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
@@ -612,7 +616,7 @@
  *	consisting of a nested array.
  *
  * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes).
- * @NL80211_ATTR_PLINK_ACTION: action to perform on the mesh peer link.
+ * @NL80211_ATTR_STA_PLINK_ACTION: action to perform on the mesh peer link.
  * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path.
  * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path
  * 	info given for %NL80211_CMD_GET_MPATH, nested attribute described at
@@ -879,7 +883,9 @@
  *	See &enum nl80211_key_default_types.
  *
  * @NL80211_ATTR_MESH_SETUP: Optional mesh setup parameters.  These cannot be
- * changed once the mesh is active.
+ *	changed once the mesh is active.
+ * @NL80211_ATTR_MESH_CONFIG: Mesh configuration parameters, a nested attribute
+ *	containing attributes from &enum nl80211_meshconf_params.
  *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -1225,8 +1231,6 @@
  * @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs)
  * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station)
  * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
- * @__NL80211_STA_INFO_AFTER_LAST: internal
- * @NL80211_STA_INFO_MAX: highest possible station info attribute
  * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
  * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
  * 	containing info as possible, see &enum nl80211_sta_info_txrate.
@@ -1236,6 +1240,11 @@
  * @NL80211_STA_INFO_TX_RETRIES: total retries (u32, to this station)
  * @NL80211_STA_INFO_TX_FAILED: total failed packets (u32, to this station)
  * @NL80211_STA_INFO_SIGNAL_AVG: signal strength average (u8, dBm)
+ * @NL80211_STA_INFO_LLID: the station's mesh LLID
+ * @NL80211_STA_INFO_PLID: the station's mesh PLID
+ * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station
+ * @__NL80211_STA_INFO_AFTER_LAST: internal
+ * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
 enum nl80211_sta_info {
 	__NL80211_STA_INFO_INVALID,
@@ -1626,7 +1635,7 @@
  * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs)
  * that it takes for an HWMP information element to propagate across the mesh
  *
- * @NL80211_MESHCONF_ROOTMODE: whether root mode is enabled or not
+ * @NL80211_MESHCONF_HWMP_ROOTMODE: whether root mode is enabled or not
  *
  * @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a
  * source mesh point for path selection elements.
@@ -1678,6 +1687,7 @@
  * element that vendors will use to identify the path selection methods and
  * metrics in use.
  *
+ * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
  * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
  */
 enum nl80211_mesh_setup_params {
diff --git a/include/linux/path.h b/include/linux/path.h
index a581e8c..edc98de 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -10,9 +10,7 @@
 };
 
 extern void path_get(struct path *);
-extern void path_get_long(struct path *);
 extern void path_put(struct path *);
-extern void path_put_long(struct path *);
 
 static inline int path_equal(const struct path *path1, const struct path *path2)
 {
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 479d9bb..4462350 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -35,9 +35,6 @@
 	return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
 					      pbus->number);
 }
-#else
-static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
-{ return NULL; }
 #endif
 
 #ifdef CONFIG_ACPI_APEI
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9e67dcd..559d028 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -993,8 +993,13 @@
 extern int pci_msi_enabled(void);
 #endif
 
+#ifdef CONFIG_PCIEPORTBUS
 extern bool pcie_ports_disabled;
 extern bool pcie_ports_auto;
+#else
+#define pcie_ports_disabled	true
+#define pcie_ports_auto		false
+#endif
 
 #ifndef CONFIG_PCIEASPM
 static inline int pcie_aspm_enabled(void)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 20ec0a64..bf221d6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -255,6 +255,11 @@
 typedef unsigned char *sk_buff_data_t;
 #endif
 
+#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
+    defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
+#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
+#endif
+
 /** 
  *	struct sk_buff - socket buffer
  *	@next: Next buffer in list
@@ -362,6 +367,8 @@
 	void			(*destructor)(struct sk_buff *skb);
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	struct nf_conntrack	*nfct;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	struct sk_buff		*nfct_reasm;
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
@@ -2057,6 +2064,8 @@
 	if (nfct)
 		atomic_inc(&nfct->use);
 }
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
 {
 	if (skb)
@@ -2085,6 +2094,8 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(skb->nfct);
 	skb->nfct = NULL;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	nf_conntrack_put_reasm(skb->nfct_reasm);
 	skb->nfct_reasm = NULL;
 #endif
@@ -2101,6 +2112,8 @@
 	dst->nfct = src->nfct;
 	nf_conntrack_get(src->nfct);
 	dst->nfctinfo = src->nfctinfo;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	dst->nfct_reasm = src->nfct_reasm;
 	nf_conntrack_get_reasm(src->nfct_reasm);
 #endif
@@ -2114,6 +2127,8 @@
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(dst->nfct);
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	nf_conntrack_put_reasm(dst->nfct_reasm);
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 78aa104..7898ea1 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -256,10 +256,13 @@
 	return rv - boot.tv_sec;
 }
 
+#ifdef CONFIG_NFSD_DEPRECATED
 static inline void sunrpc_invalidate(struct cache_head *h,
 				     struct cache_detail *detail)
 {
 	h->expiry_time = seconds_since_boot() - 1;
 	detail->nextcheck = seconds_since_boot();
 }
+#endif /* CONFIG_NFSD_DEPRECATED */
+
 #endif /*  _LINUX_SUNRPC_CACHE_H_ */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index c81d4d8..ea29330 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -269,6 +269,7 @@
 	struct cache_req	rq_chandle;	/* handle passed to caches for 
 						 * request delaying 
 						 */
+	bool			rq_dropme;
 	/* Catering to nfsd */
 	struct auth_domain *	rq_client;	/* RPC peer info */
 	struct auth_domain *	rq_gssclient;	/* "gss/"-style peer info */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 357da5e..059877b 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -63,7 +63,6 @@
 #define XPT_LISTENER	11		/* listening endpoint */
 #define XPT_CACHE_AUTH	12		/* cache auth info */
 
-	struct svc_pool		*xpt_pool;	/* current pool iff queued */
 	struct svc_serv		*xpt_server;	/* service for transport */
 	atomic_t    	    	xpt_reserved;	/* space on outq that is rsvd */
 	struct mutex		xpt_mutex;	/* to serialize sending data */
@@ -81,6 +80,7 @@
 	void			*xpt_bc_sid;	/* back channel session ID */
 
 	struct net		*xpt_net;
+	struct rpc_xprt		*xpt_bc_xprt;	/* NFSv4.1 backchannel */
 };
 
 static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 1b353a7..04dba23 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,7 +28,6 @@
 	/* private TCP part */
 	u32			sk_reclen;	/* length of record */
 	u32			sk_tcplen;	/* current read length */
-	struct rpc_xprt		*sk_bc_xprt;	/* NFSv4.1 backchannel xprt */
 };
 
 /*
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 89d10d2..bef0f53 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -321,6 +321,7 @@
 #define XPRT_CLOSING		(6)
 #define XPRT_CONNECTION_ABORT	(7)
 #define XPRT_CONNECTION_CLOSE	(8)
+#define XPRT_INITIALIZED	(9)
 
 static inline void xprt_set_connected(struct rpc_xprt *xprt)
 {
diff --git a/include/net/ah.h b/include/net/ah.h
index be7798d..ca95b98 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -4,7 +4,7 @@
 #include <linux/skbuff.h>
 
 /* This is the maximum truncated ICV length that we know of. */
-#define MAX_AH_AUTH_LEN	16
+#define MAX_AH_AUTH_LEN	64
 
 struct crypto_ahash;
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index bcc9f44..1322695 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1103,6 +1103,8 @@
  * @change_mpath: change a given mesh path
  * @get_mpath: get a mesh path for the given parameters
  * @dump_mpath: dump mesh path callback -- resume dump at index @idx
+ * @join_mesh: join the mesh network with the specified parameters
+ * @leave_mesh: leave the current mesh network
  *
  * @get_mesh_config: Get the current mesh configuration
  *
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 5b3fd5a..62c0ce2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -337,6 +337,10 @@
  * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame
  * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this
  *	frame and selects the maximum number of streams that it can use.
+ * @IEEE80211_TX_CTL_TX_OFFCHAN: Marks this packet to be transmitted on
+ *	the off-channel channel when a remain-on-channel offload is done
+ *	in hardware -- normal packets still flow and are expected to be
+ *	handled properly by the device.
  *
  * Note: If you have to add new flags to the enumeration, then don't
  *	 forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -1753,6 +1757,16 @@
  *	(also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
  *
  * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
+ *
+ * @remain_on_channel: Starts an off-channel period on the given channel, must
+ *	call back to ieee80211_ready_on_channel() when on that channel. Note
+ *	that normal channel traffic is not stopped as this is intended for hw
+ *	offload. Frames to transmit on the off-channel channel are transmitted
+ *	normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the
+ *	duration (which will always be non-zero) expires, the driver must call
+ *	ieee80211_remain_on_channel_expired(). This callback may sleep.
+ * @cancel_remain_on_channel: Requests that an ongoing off-channel period is
+ *	aborted before it expires. This callback may sleep.
  */
 struct ieee80211_ops {
 	int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 1ee717e..a4c9936 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -7,16 +7,6 @@
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
 
-extern int nf_ct_frag6_init(void);
-extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
-			       struct net_device *in,
-			       struct net_device *out,
-			       int (*okfn)(struct sk_buff *));
-
-struct inet_frags_ctl;
-
 #include <linux/sysctl.h>
 extern struct ctl_table nf_ct_ipv6_sysctl_table[];
 
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 94dd54d..fd79c9a 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -3,4 +3,14 @@
 
 extern void nf_defrag_ipv6_enable(void);
 
+extern int nf_ct_frag6_init(void);
+extern void nf_ct_frag6_cleanup(void);
+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+			       struct net_device *in,
+			       struct net_device *out,
+			       int (*okfn)(struct sk_buff *));
+
+struct inet_frags_ctl;
+
 #endif /* _NF_DEFRAG_IPV6_H */
diff --git a/include/net/red.h b/include/net/red.h
index 995108e..3319f16 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -97,7 +97,6 @@
 	u32		forced_mark;	/* Forced marks, qavg > max_thresh */
 	u32		pdrop;          /* Drops due to queue limits */
 	u32		other;          /* Drops due to drop() calls */
-	u32		backlog;
 };
 
 struct red_parms {
diff --git a/include/target/configfs_macros.h b/include/target/configfs_macros.h
new file mode 100644
index 0000000..7fe7460
--- /dev/null
+++ b/include/target/configfs_macros.h
@@ -0,0 +1,147 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * configfs_macros.h - extends macros for configfs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * 	sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * Based on kobject.h:
+ *      Copyright (c) 2002-2003	Patrick Mochel
+ *      Copyright (c) 2002-2003	Open Source Development Labs
+ *
+ * configfs Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * Added CONFIGFS_EATTR() macros from original configfs.h macros
+ * Copright (C) 2008-2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * Please read Documentation/filesystems/configfs.txt before using the
+ * configfs interface, ESPECIALLY the parts about reference counts and
+ * item destructors.
+ */
+
+#ifndef _CONFIGFS_MACROS_H_
+#define _CONFIGFS_MACROS_H_
+
+#include <linux/configfs.h>
+
+/*
+ * Users often need to create attribute structures for their configurable
+ * attributes, containing a configfs_attribute member and function pointers
+ * for the show() and store() operations on that attribute. If they don't
+ * need anything else on the extended attribute structure, they can use
+ * this macro to define it.  The argument _name isends up as
+ * 'struct _name_attribute, as well as names of to CONFIGFS_ATTR_OPS() below.
+ * The argument _item is the name of the structure containing the
+ * struct config_item or struct config_group structure members
+ */
+#define CONFIGFS_EATTR_STRUCT(_name, _item)				\
+struct _name##_attribute {						\
+	struct configfs_attribute attr;					\
+	ssize_t (*show)(struct _item *, char *);			\
+	ssize_t (*store)(struct _item *, const char *, size_t);		\
+}
+
+/*
+ * With the extended attribute structure, users can use this macro
+ * (similar to sysfs' __ATTR) to make defining attributes easier.
+ * An example:
+ * #define MYITEM_EATTR(_name, _mode, _show, _store)	\
+ * struct myitem_attribute childless_attr_##_name =	\
+ *         __CONFIGFS_EATTR(_name, _mode, _show, _store)
+ */
+#define __CONFIGFS_EATTR(_name, _mode, _show, _store)			\
+{									\
+	.attr	= {							\
+			.ca_name = __stringify(_name),			\
+			.ca_mode = _mode,				\
+			.ca_owner = THIS_MODULE,			\
+	},								\
+	.show	= _show,						\
+	.store	= _store,						\
+}
+/* Here is a readonly version, only requiring a show() operation */
+#define __CONFIGFS_EATTR_RO(_name, _show)				\
+{									\
+	.attr	= {							\
+			.ca_name = __stringify(_name),			\
+			.ca_mode = 0444,				\
+			.ca_owner = THIS_MODULE,			\
+	},								\
+	.show	= _show,						\
+}
+
+/*
+ * With these extended attributes, the simple show_attribute() and
+ * store_attribute() operations need to call the show() and store() of the
+ * attributes.  This is a common pattern, so we provide a macro to define
+ * them.  The argument _name is the name of the attribute defined by
+ * CONFIGFS_ATTR_STRUCT(). The argument _item is the name of the structure
+ * containing the struct config_item or struct config_group structure member.
+ * The argument _item_member is the actual name of the struct config_* struct
+ * in your _item structure.  Meaning  my_structure->some_config_group.
+ *		                      ^^_item^^^^^  ^^_item_member^^^
+ * This macro expects the attributes to be named "struct <name>_attribute".
+ */
+#define CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member)		\
+static struct _item *to_##_name(struct config_item *ci)			\
+{									\
+	return (ci) ? container_of(to_config_group(ci), struct _item,	\
+		_item_member) : NULL;					\
+}
+
+#define CONFIGFS_EATTR_OPS_SHOW(_name, _item)				\
+static ssize_t _name##_attr_show(struct config_item *item,		\
+				 struct configfs_attribute *attr,	\
+				 char *page)				\
+{									\
+	struct _item *_item = to_##_name(item);				\
+	struct _name##_attribute * _name##_attr =			\
+		container_of(attr, struct _name##_attribute, attr);	\
+	ssize_t ret = 0;						\
+									\
+	if (_name##_attr->show)						\
+		ret = _name##_attr->show(_item, page);			\
+	return ret;							\
+}
+
+#define CONFIGFS_EATTR_OPS_STORE(_name, _item)				\
+static ssize_t _name##_attr_store(struct config_item *item,		\
+				  struct configfs_attribute *attr,	\
+				  const char *page, size_t count)	\
+{									\
+	struct _item *_item = to_##_name(item);				\
+	struct _name##_attribute * _name##_attr =			\
+		container_of(attr, struct _name##_attribute, attr);	\
+	ssize_t ret = -EINVAL;						\
+									\
+	if (_name##_attr->store)					\
+		ret = _name##_attr->store(_item, page, count);		\
+	return ret;							\
+}
+
+#define CONFIGFS_EATTR_OPS(_name, _item, _item_member)			\
+	CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member);		\
+	CONFIGFS_EATTR_OPS_SHOW(_name, _item);				\
+	CONFIGFS_EATTR_OPS_STORE(_name, _item);
+
+#define CONFIGFS_EATTR_OPS_RO(_name, _item, _item_member)		\
+	CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member);		\
+	CONFIGFS_EATTR_OPS_SHOW(_name, _item);
+
+#endif /* _CONFIGFS_MACROS_H_ */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
new file mode 100644
index 0000000..07fdfb6
--- /dev/null
+++ b/include/target/target_core_base.h
@@ -0,0 +1,937 @@
+#ifndef TARGET_CORE_BASE_H
+#define TARGET_CORE_BASE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_cmnd.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include "target_core_mib.h"
+
+#define TARGET_CORE_MOD_VERSION		"v4.0.0-rc6"
+#define SHUTDOWN_SIGS	(sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
+
+/* Used by transport_generic_allocate_iovecs() */
+#define TRANSPORT_IOV_DATA_BUFFER		5
+/* Maximum Number of LUNs per Target Portal Group */
+#define TRANSPORT_MAX_LUNS_PER_TPG		256
+/*
+ * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
+ *
+ * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
+ * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
+ * 16-byte CDBs by default and require an extra allocation for
+ * 32-byte CDBs to becasue of legacy issues.
+ *
+ * Within TCM Core there are no such legacy limitiations, so we go ahead
+ * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
+ * within all TCM Core and subsystem plugin code.
+ */
+#define TCM_MAX_COMMAND_SIZE			32
+/*
+ * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
+ * defined 96, but the real limit is 252 (or 260 including the header)
+ */
+#define TRANSPORT_SENSE_BUFFER			SCSI_SENSE_BUFFERSIZE
+/* Used by transport_send_check_condition_and_sense() */
+#define SPC_SENSE_KEY_OFFSET			2
+#define SPC_ASC_KEY_OFFSET			12
+#define SPC_ASCQ_KEY_OFFSET			13
+#define TRANSPORT_IQN_LEN			224
+/* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
+#define LU_GROUP_NAME_BUF			256
+/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
+#define TG_PT_GROUP_NAME_BUF			256
+/* Used to parse VPD into struct t10_vpd */
+#define VPD_TMP_BUF_SIZE			128
+/* Used by transport_generic_cmd_sequencer() */
+#define READ_BLOCK_LEN          		6
+#define READ_CAP_LEN            		8
+#define READ_POSITION_LEN       		20
+#define INQUIRY_LEN				36
+/* Used by transport_get_inquiry_vpd_serial() */
+#define INQUIRY_VPD_SERIAL_LEN			254
+/* Used by transport_get_inquiry_vpd_device_ident() */
+#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN	254
+
+/* struct se_hba->hba_flags */
+enum hba_flags_table {
+	HBA_FLAGS_INTERNAL_USE	= 0x01,
+	HBA_FLAGS_PSCSI_MODE	= 0x02,
+};
+
+/* struct se_lun->lun_status */
+enum transport_lun_status_table {
+	TRANSPORT_LUN_STATUS_FREE = 0,
+	TRANSPORT_LUN_STATUS_ACTIVE = 1,
+};
+
+/* struct se_portal_group->se_tpg_type */
+enum transport_tpg_type_table {
+	TRANSPORT_TPG_TYPE_NORMAL = 0,
+	TRANSPORT_TPG_TYPE_DISCOVERY = 1,
+};
+
+/* Used for generate timer flags */
+enum timer_flags_table {
+	TF_RUNNING	= 0x01,
+	TF_STOP		= 0x02,
+};
+
+/* Special transport agnostic struct se_cmd->t_states */
+enum transport_state_table {
+	TRANSPORT_NO_STATE	= 0,
+	TRANSPORT_NEW_CMD	= 1,
+	TRANSPORT_DEFERRED_CMD	= 2,
+	TRANSPORT_WRITE_PENDING	= 3,
+	TRANSPORT_PROCESS_WRITE	= 4,
+	TRANSPORT_PROCESSING	= 5,
+	TRANSPORT_COMPLETE_OK	= 6,
+	TRANSPORT_COMPLETE_FAILURE = 7,
+	TRANSPORT_COMPLETE_TIMEOUT = 8,
+	TRANSPORT_PROCESS_TMR	= 9,
+	TRANSPORT_TMR_COMPLETE	= 10,
+	TRANSPORT_ISTATE_PROCESSING = 11,
+	TRANSPORT_ISTATE_PROCESSED = 12,
+	TRANSPORT_KILL		= 13,
+	TRANSPORT_REMOVE	= 14,
+	TRANSPORT_FREE		= 15,
+	TRANSPORT_NEW_CMD_MAP	= 16,
+};
+
+/* Used for struct se_cmd->se_cmd_flags */
+enum se_cmd_flags_table {
+	SCF_SUPPORTED_SAM_OPCODE	= 0x00000001,
+	SCF_TRANSPORT_TASK_SENSE	= 0x00000002,
+	SCF_EMULATED_TASK_SENSE		= 0x00000004,
+	SCF_SCSI_DATA_SG_IO_CDB		= 0x00000008,
+	SCF_SCSI_CONTROL_SG_IO_CDB	= 0x00000010,
+	SCF_SCSI_CONTROL_NONSG_IO_CDB	= 0x00000020,
+	SCF_SCSI_NON_DATA_CDB		= 0x00000040,
+	SCF_SCSI_CDB_EXCEPTION		= 0x00000080,
+	SCF_SCSI_RESERVATION_CONFLICT	= 0x00000100,
+	SCF_CMD_PASSTHROUGH_NOALLOC	= 0x00000200,
+	SCF_SE_CMD_FAILED		= 0x00000400,
+	SCF_SE_LUN_CMD			= 0x00000800,
+	SCF_SE_ALLOW_EOO		= 0x00001000,
+	SCF_SE_DISABLE_ONLINE_CHECK	= 0x00002000,
+	SCF_SENT_CHECK_CONDITION	= 0x00004000,
+	SCF_OVERFLOW_BIT		= 0x00008000,
+	SCF_UNDERFLOW_BIT		= 0x00010000,
+	SCF_SENT_DELAYED_TAS		= 0x00020000,
+	SCF_ALUA_NON_OPTIMIZED		= 0x00040000,
+	SCF_DELAYED_CMD_FROM_SAM_ATTR	= 0x00080000,
+	SCF_PASSTHROUGH_SG_TO_MEM	= 0x00100000,
+	SCF_PASSTHROUGH_CONTIG_TO_SG	= 0x00200000,
+	SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
+	SCF_EMULATE_SYNC_CACHE		= 0x00800000,
+	SCF_EMULATE_CDB_ASYNC		= 0x01000000,
+	SCF_EMULATE_SYNC_UNMAP		= 0x02000000
+};
+
+/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
+enum transport_lunflags_table {
+	TRANSPORT_LUNFLAGS_NO_ACCESS		= 0x00,
+	TRANSPORT_LUNFLAGS_INITIATOR_ACCESS	= 0x01,
+	TRANSPORT_LUNFLAGS_READ_ONLY		= 0x02,
+	TRANSPORT_LUNFLAGS_READ_WRITE		= 0x04,
+};
+
+/* struct se_device->dev_status */
+enum transport_device_status_table {
+	TRANSPORT_DEVICE_ACTIVATED		= 0x01,
+	TRANSPORT_DEVICE_DEACTIVATED		= 0x02,
+	TRANSPORT_DEVICE_QUEUE_FULL		= 0x04,
+	TRANSPORT_DEVICE_SHUTDOWN		= 0x08,
+	TRANSPORT_DEVICE_OFFLINE_ACTIVATED	= 0x10,
+	TRANSPORT_DEVICE_OFFLINE_DEACTIVATED	= 0x20,
+};
+
+/*
+ * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
+ * to signal which ASC/ASCQ sense payload should be built.
+ */
+enum tcm_sense_reason_table {
+	TCM_NON_EXISTENT_LUN			= 0x01,
+	TCM_UNSUPPORTED_SCSI_OPCODE		= 0x02,
+	TCM_INCORRECT_AMOUNT_OF_DATA		= 0x03,
+	TCM_UNEXPECTED_UNSOLICITED_DATA		= 0x04,
+	TCM_SERVICE_CRC_ERROR			= 0x05,
+	TCM_SNACK_REJECTED			= 0x06,
+	TCM_SECTOR_COUNT_TOO_MANY		= 0x07,
+	TCM_INVALID_CDB_FIELD			= 0x08,
+	TCM_INVALID_PARAMETER_LIST		= 0x09,
+	TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE	= 0x0a,
+	TCM_UNKNOWN_MODE_PAGE			= 0x0b,
+	TCM_WRITE_PROTECTED			= 0x0c,
+	TCM_CHECK_CONDITION_ABORT_CMD		= 0x0d,
+	TCM_CHECK_CONDITION_UNIT_ATTENTION	= 0x0e,
+	TCM_CHECK_CONDITION_NOT_READY		= 0x0f,
+};
+
+struct se_obj {
+	atomic_t obj_access_count;
+} ____cacheline_aligned;
+
+/*
+ * Used by TCM Core internally to signal if ALUA emulation is enabled or
+ * disabled, or running in with TCM/pSCSI passthrough mode
+ */
+typedef enum {
+	SPC_ALUA_PASSTHROUGH,
+	SPC2_ALUA_DISABLED,
+	SPC3_ALUA_EMULATED
+} t10_alua_index_t;
+
+/*
+ * Used by TCM Core internally to signal if SAM Task Attribute emulation
+ * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
+ */
+typedef enum {
+	SAM_TASK_ATTR_PASSTHROUGH,
+	SAM_TASK_ATTR_UNTAGGED,
+	SAM_TASK_ATTR_EMULATED
+} t10_task_attr_index_t;
+
+struct se_cmd;
+
+struct t10_alua {
+	t10_alua_index_t alua_type;
+	/* ALUA Target Port Group ID */
+	u16	alua_tg_pt_gps_counter;
+	u32	alua_tg_pt_gps_count;
+	spinlock_t tg_pt_gps_lock;
+	struct se_subsystem_dev *t10_sub_dev;
+	/* Used for default ALUA Target Port Group */
+	struct t10_alua_tg_pt_gp *default_tg_pt_gp;
+	/* Used for default ALUA Target Port Group ConfigFS group */
+	struct config_group alua_tg_pt_gps_group;
+	int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
+	struct list_head tg_pt_gps_list;
+} ____cacheline_aligned;
+
+struct t10_alua_lu_gp {
+	u16	lu_gp_id;
+	int	lu_gp_valid_id;
+	u32	lu_gp_members;
+	atomic_t lu_gp_shutdown;
+	atomic_t lu_gp_ref_cnt;
+	spinlock_t lu_gp_lock;
+	struct config_group lu_gp_group;
+	struct list_head lu_gp_list;
+	struct list_head lu_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_alua_lu_gp_member {
+	int lu_gp_assoc:1;
+	atomic_t lu_gp_mem_ref_cnt;
+	spinlock_t lu_gp_mem_lock;
+	struct t10_alua_lu_gp *lu_gp;
+	struct se_device *lu_gp_mem_dev;
+	struct list_head lu_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_alua_tg_pt_gp {
+	u16	tg_pt_gp_id;
+	int	tg_pt_gp_valid_id;
+	int	tg_pt_gp_alua_access_status;
+	int	tg_pt_gp_alua_access_type;
+	int	tg_pt_gp_nonop_delay_msecs;
+	int	tg_pt_gp_trans_delay_msecs;
+	int	tg_pt_gp_pref;
+	int	tg_pt_gp_write_metadata;
+	/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
+#define ALUA_MD_BUF_LEN				1024
+	u32	tg_pt_gp_md_buf_len;
+	u32	tg_pt_gp_members;
+	atomic_t tg_pt_gp_alua_access_state;
+	atomic_t tg_pt_gp_ref_cnt;
+	spinlock_t tg_pt_gp_lock;
+	struct mutex tg_pt_gp_md_mutex;
+	struct se_subsystem_dev *tg_pt_gp_su_dev;
+	struct config_group tg_pt_gp_group;
+	struct list_head tg_pt_gp_list;
+	struct list_head tg_pt_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_alua_tg_pt_gp_member {
+	int tg_pt_gp_assoc:1;
+	atomic_t tg_pt_gp_mem_ref_cnt;
+	spinlock_t tg_pt_gp_mem_lock;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_port *tg_pt;
+	struct list_head tg_pt_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_vpd {
+	unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
+	int protocol_identifier_set;
+	u32 protocol_identifier;
+	u32 device_identifier_code_set;
+	u32 association;
+	u32 device_identifier_type;
+	struct list_head vpd_list;
+} ____cacheline_aligned;
+
+struct t10_wwn {
+	unsigned char vendor[8];
+	unsigned char model[16];
+	unsigned char revision[4];
+	unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN];
+	spinlock_t t10_vpd_lock;
+	struct se_subsystem_dev *t10_sub_dev;
+	struct config_group t10_wwn_group;
+	struct list_head t10_vpd_list;
+} ____cacheline_aligned;
+
+
+/*
+ * Used by TCM Core internally to signal if >= SPC-3 peristent reservations
+ * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
+ * mode
+ */
+typedef enum {
+	SPC_PASSTHROUGH,
+	SPC2_RESERVATIONS,
+	SPC3_PERSISTENT_RESERVATIONS
+} t10_reservations_index_t;
+
+struct t10_pr_registration {
+	/* Used for fabrics that contain WWN+ISID */
+#define PR_REG_ISID_LEN				16
+	/* PR_REG_ISID_LEN + ',i,0x' */
+#define PR_REG_ISID_ID_LEN			(PR_REG_ISID_LEN + 5)
+	char pr_reg_isid[PR_REG_ISID_LEN];
+	/* Used during APTPL metadata reading */
+#define PR_APTPL_MAX_IPORT_LEN			256
+	unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
+	/* Used during APTPL metadata reading */
+#define PR_APTPL_MAX_TPORT_LEN			256
+	unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
+	/* For writing out live meta data */
+	unsigned char *pr_aptpl_buf;
+	u16 pr_aptpl_rpti;
+	u16 pr_reg_tpgt;
+	/* Reservation effects all target ports */
+	int pr_reg_all_tg_pt;
+	/* Activate Persistence across Target Power Loss */
+	int pr_reg_aptpl;
+	int pr_res_holder;
+	int pr_res_type;
+	int pr_res_scope;
+	/* Used for fabric initiator WWPNs using a ISID */
+	int isid_present_at_reg:1;
+	u32 pr_res_mapped_lun;
+	u32 pr_aptpl_target_lun;
+	u32 pr_res_generation;
+	u64 pr_reg_bin_isid;
+	u64 pr_res_key;
+	atomic_t pr_res_holders;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_dev_entry *pr_reg_deve;
+	struct se_lun *pr_reg_tg_pt_lun;
+	struct list_head pr_reg_list;
+	struct list_head pr_reg_abort_list;
+	struct list_head pr_reg_aptpl_list;
+	struct list_head pr_reg_atp_list;
+	struct list_head pr_reg_atp_mem_list;
+} ____cacheline_aligned;
+
+/*
+ * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
+ * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
+ * core_setup_reservations()
+ */
+struct t10_reservation_ops {
+	int (*t10_reservation_check)(struct se_cmd *, u32 *);
+	int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
+	int (*t10_pr_register)(struct se_cmd *);
+	int (*t10_pr_clear)(struct se_cmd *);
+};
+
+struct t10_reservation_template {
+	/* Reservation effects all target ports */
+	int pr_all_tg_pt;
+	/* Activate Persistence across Target Power Loss enabled
+	 * for SCSI device */
+	int pr_aptpl_active;
+	/* Used by struct t10_reservation_template->pr_aptpl_buf_len */
+#define PR_APTPL_BUF_LEN			8192
+	u32 pr_aptpl_buf_len;
+	u32 pr_generation;
+	t10_reservations_index_t res_type;
+	spinlock_t registration_lock;
+	spinlock_t aptpl_reg_lock;
+	/*
+	 * This will always be set by one individual I_T Nexus.
+	 * However with all_tg_pt=1, other I_T Nexus from the
+	 * same initiator can access PR reg/res info on a different
+	 * target port.
+	 *
+	 * There is also the 'All Registrants' case, where there is
+	 * a single *pr_res_holder of the reservation, but all
+	 * registrations are considered reservation holders.
+	 */
+	struct se_node_acl *pr_res_holder;
+	struct list_head registration_list;
+	struct list_head aptpl_reg_list;
+	struct t10_reservation_ops pr_ops;
+} ____cacheline_aligned;
+
+struct se_queue_req {
+	int			state;
+	void			*cmd;
+	struct list_head	qr_list;
+} ____cacheline_aligned;
+
+struct se_queue_obj {
+	atomic_t		queue_cnt;
+	spinlock_t		cmd_queue_lock;
+	struct list_head	qobj_list;
+	wait_queue_head_t	thread_wq;
+} ____cacheline_aligned;
+
+/*
+ * Used one per struct se_cmd to hold all extra struct se_task
+ * metadata.  This structure is setup and allocated in
+ * drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
+ */
+struct se_transport_task {
+	unsigned char		*t_task_cdb;
+	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+	unsigned long long	t_task_lba;
+	int			t_tasks_failed;
+	int			t_tasks_fua;
+	int			t_tasks_bidi:1;
+	u32			t_task_cdbs;
+	u32			t_tasks_check;
+	u32			t_tasks_no;
+	u32			t_tasks_sectors;
+	u32			t_tasks_se_num;
+	u32			t_tasks_se_bidi_num;
+	u32			t_tasks_sg_chained_no;
+	atomic_t		t_fe_count;
+	atomic_t		t_se_count;
+	atomic_t		t_task_cdbs_left;
+	atomic_t		t_task_cdbs_ex_left;
+	atomic_t		t_task_cdbs_timeout_left;
+	atomic_t		t_task_cdbs_sent;
+	atomic_t		t_transport_aborted;
+	atomic_t		t_transport_active;
+	atomic_t		t_transport_complete;
+	atomic_t		t_transport_queue_active;
+	atomic_t		t_transport_sent;
+	atomic_t		t_transport_stop;
+	atomic_t		t_transport_timeout;
+	atomic_t		transport_dev_active;
+	atomic_t		transport_lun_active;
+	atomic_t		transport_lun_fe_stop;
+	atomic_t		transport_lun_stop;
+	spinlock_t		t_state_lock;
+	struct completion	t_transport_stop_comp;
+	struct completion	transport_lun_fe_stop_comp;
+	struct completion	transport_lun_stop_comp;
+	struct scatterlist	*t_tasks_sg_chained;
+	struct scatterlist	t_tasks_sg_bounce;
+	void			*t_task_buf;
+	/*
+	 * Used for pre-registered fabric SGL passthrough WRITE and READ
+	 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
+	 * and other HW target mode fabric modules.
+	 */
+	struct scatterlist	*t_task_pt_sgl;
+	struct list_head	*t_mem_list;
+	/* Used for BIDI READ */
+	struct list_head	*t_mem_bidi_list;
+	struct list_head	t_task_list;
+} ____cacheline_aligned;
+
+struct se_task {
+	unsigned char	task_sense;
+	struct scatterlist *task_sg;
+	struct scatterlist *task_sg_bidi;
+	u8		task_scsi_status;
+	u8		task_flags;
+	int		task_error_status;
+	int		task_state_flags;
+	int		task_padded_sg:1;
+	unsigned long long	task_lba;
+	u32		task_no;
+	u32		task_sectors;
+	u32		task_size;
+	u32		task_sg_num;
+	u32		task_sg_offset;
+	enum dma_data_direction	task_data_direction;
+	struct se_cmd *task_se_cmd;
+	struct se_device	*se_dev;
+	struct completion	task_stop_comp;
+	atomic_t	task_active;
+	atomic_t	task_execute_queue;
+	atomic_t	task_timeout;
+	atomic_t	task_sent;
+	atomic_t	task_stop;
+	atomic_t	task_state_active;
+	struct timer_list	task_timer;
+	struct se_device *se_obj_ptr;
+	struct list_head t_list;
+	struct list_head t_execute_list;
+	struct list_head t_state_list;
+} ____cacheline_aligned;
+
+#define TASK_CMD(task)	((struct se_cmd *)task->task_se_cmd)
+#define TASK_DEV(task)	((struct se_device *)task->se_dev)
+
+struct se_cmd {
+	/* SAM response code being sent to initiator */
+	u8			scsi_status;
+	u8			scsi_asc;
+	u8			scsi_ascq;
+	u8			scsi_sense_reason;
+	u16			scsi_sense_length;
+	/* Delay for ALUA Active/NonOptimized state access in milliseconds */
+	int			alua_nonop_delay;
+	/* See include/linux/dma-mapping.h */
+	enum dma_data_direction	data_direction;
+	/* For SAM Task Attribute */
+	int			sam_task_attr;
+	/* Transport protocol dependent state, see transport_state_table */
+	enum transport_state_table t_state;
+	/* Transport protocol dependent state for out of order CmdSNs */
+	int			deferred_t_state;
+	/* Transport specific error status */
+	int			transport_error_status;
+	/* See se_cmd_flags_table */
+	u32			se_cmd_flags;
+	u32			se_ordered_id;
+	/* Total size in bytes associated with command */
+	u32			data_length;
+	/* SCSI Presented Data Transfer Length */
+	u32			cmd_spdtl;
+	u32			residual_count;
+	u32			orig_fe_lun;
+	/* Persistent Reservation key */
+	u64			pr_res_key;
+	atomic_t                transport_sent;
+	/* Used for sense data */
+	void			*sense_buffer;
+	struct list_head	se_delayed_list;
+	struct list_head	se_ordered_list;
+	struct list_head	se_lun_list;
+	struct se_device      *se_dev;
+	struct se_dev_entry   *se_deve;
+	struct se_device	*se_obj_ptr;
+	struct se_device	*se_orig_obj_ptr;
+	struct se_lun		*se_lun;
+	/* Only used for internal passthrough and legacy TCM fabric modules */
+	struct se_session	*se_sess;
+	struct se_tmr_req	*se_tmr_req;
+	/* t_task is setup to t_task_backstore in transport_init_se_cmd() */
+	struct se_transport_task *t_task;
+	struct se_transport_task t_task_backstore;
+	struct target_core_fabric_ops *se_tfo;
+	int (*transport_emulate_cdb)(struct se_cmd *);
+	void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
+	void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
+	void (*transport_complete_callback)(struct se_cmd *);
+} ____cacheline_aligned;
+
+#define T_TASK(cmd)     ((struct se_transport_task *)(cmd->t_task))
+#define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo)
+
+struct se_tmr_req {
+	/* Task Management function to be preformed */
+	u8			function;
+	/* Task Management response to send */
+	u8			response;
+	int			call_transport;
+	/* Reference to ITT that Task Mgmt should be preformed */
+	u32			ref_task_tag;
+	/* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
+	u64			ref_task_lun;
+	void 			*fabric_tmr_ptr;
+	struct se_cmd		*task_cmd;
+	struct se_cmd		*ref_cmd;
+	struct se_device	*tmr_dev;
+	struct se_lun		*tmr_lun;
+	struct list_head	tmr_list;
+} ____cacheline_aligned;
+
+struct se_ua {
+	u8			ua_asc;
+	u8			ua_ascq;
+	struct se_node_acl	*ua_nacl;
+	struct list_head	ua_dev_list;
+	struct list_head	ua_nacl_list;
+} ____cacheline_aligned;
+
+struct se_node_acl {
+	char			initiatorname[TRANSPORT_IQN_LEN];
+	/* Used to signal demo mode created ACL, disabled by default */
+	int			dynamic_node_acl:1;
+	u32			queue_depth;
+	u32			acl_index;
+	u64			num_cmds;
+	u64			read_bytes;
+	u64			write_bytes;
+	spinlock_t		stats_lock;
+	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
+	atomic_t		acl_pr_ref_count;
+	/* Used for MIB access */
+	atomic_t		mib_ref_count;
+	struct se_dev_entry	*device_list;
+	struct se_session	*nacl_sess;
+	struct se_portal_group *se_tpg;
+	spinlock_t		device_list_lock;
+	spinlock_t		nacl_sess_lock;
+	struct config_group	acl_group;
+	struct config_group	acl_attrib_group;
+	struct config_group	acl_auth_group;
+	struct config_group	acl_param_group;
+	struct config_group	*acl_default_groups[4];
+	struct list_head	acl_list;
+	struct list_head	acl_sess_list;
+} ____cacheline_aligned;
+
+struct se_session {
+	/* Used for MIB access */
+	atomic_t		mib_ref_count;
+	u64			sess_bin_isid;
+	struct se_node_acl	*se_node_acl;
+	struct se_portal_group *se_tpg;
+	void			*fabric_sess_ptr;
+	struct list_head	sess_list;
+	struct list_head	sess_acl_list;
+} ____cacheline_aligned;
+
+#define SE_SESS(cmd)		((struct se_session *)(cmd)->se_sess)
+#define SE_NODE_ACL(sess)	((struct se_node_acl *)(sess)->se_node_acl)
+
+struct se_device;
+struct se_transform_info;
+struct scatterlist;
+
+struct se_lun_acl {
+	char			initiatorname[TRANSPORT_IQN_LEN];
+	u32			mapped_lun;
+	struct se_node_acl	*se_lun_nacl;
+	struct se_lun		*se_lun;
+	struct list_head	lacl_list;
+	struct config_group	se_lun_group;
+}  ____cacheline_aligned;
+
+struct se_dev_entry {
+	int			def_pr_registered:1;
+	/* See transport_lunflags_table */
+	u32			lun_flags;
+	u32			deve_cmds;
+	u32			mapped_lun;
+	u32			average_bytes;
+	u32			last_byte_count;
+	u32			total_cmds;
+	u32			total_bytes;
+	u64			pr_res_key;
+	u64			creation_time;
+	u32			attach_count;
+	u64			read_bytes;
+	u64			write_bytes;
+	atomic_t		ua_count;
+	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
+	atomic_t		pr_ref_count;
+	struct se_lun_acl	*se_lun_acl;
+	spinlock_t		ua_lock;
+	struct se_lun		*se_lun;
+	struct list_head	alua_port_list;
+	struct list_head	ua_list;
+}  ____cacheline_aligned;
+
+struct se_dev_limits {
+	/* Max supported HW queue depth */
+	u32		hw_queue_depth;
+	/* Max supported virtual queue depth */
+	u32		queue_depth;
+	/* From include/linux/blkdev.h for the other HW/SW limits. */
+	struct queue_limits limits;
+} ____cacheline_aligned;
+
+struct se_dev_attrib {
+	int		emulate_dpo;
+	int		emulate_fua_write;
+	int		emulate_fua_read;
+	int		emulate_write_cache;
+	int		emulate_ua_intlck_ctrl;
+	int		emulate_tas;
+	int		emulate_tpu;
+	int		emulate_tpws;
+	int		emulate_reservations;
+	int		emulate_alua;
+	int		enforce_pr_isids;
+	u32		hw_block_size;
+	u32		block_size;
+	u32		hw_max_sectors;
+	u32		max_sectors;
+	u32		optimal_sectors;
+	u32		hw_queue_depth;
+	u32		queue_depth;
+	u32		task_timeout;
+	u32		max_unmap_lba_count;
+	u32		max_unmap_block_desc_count;
+	u32		unmap_granularity;
+	u32		unmap_granularity_alignment;
+	struct se_subsystem_dev *da_sub_dev;
+	struct config_group da_group;
+} ____cacheline_aligned;
+
+struct se_subsystem_dev {
+/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
+#define SE_DEV_ALIAS_LEN		512
+	unsigned char	se_dev_alias[SE_DEV_ALIAS_LEN];
+/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
+#define SE_UDEV_PATH_LEN		512
+	unsigned char	se_dev_udev_path[SE_UDEV_PATH_LEN];
+	u32		su_dev_flags;
+	struct se_hba *se_dev_hba;
+	struct se_device *se_dev_ptr;
+	struct se_dev_attrib se_dev_attrib;
+	/* T10 Asymmetric Logical Unit Assignment for Target Ports */
+	struct t10_alua	t10_alua;
+	/* T10 Inquiry and VPD WWN Information */
+	struct t10_wwn	t10_wwn;
+	/* T10 SPC-2 + SPC-3 Reservations */
+	struct t10_reservation_template t10_reservation;
+	spinlock_t      se_dev_lock;
+	void            *se_dev_su_ptr;
+	struct list_head g_se_dev_list;
+	struct config_group se_dev_group;
+	/* For T10 Reservations */
+	struct config_group se_dev_pr_group;
+} ____cacheline_aligned;
+
+#define T10_ALUA(su_dev)	(&(su_dev)->t10_alua)
+#define T10_RES(su_dev)		(&(su_dev)->t10_reservation)
+#define T10_PR_OPS(su_dev)	(&(su_dev)->t10_reservation.pr_ops)
+
+struct se_device {
+	/* Set to 1 if thread is NOT sleeping on thread_sem */
+	u8			thread_active;
+	u8			dev_status_timer_flags;
+	/* RELATIVE TARGET PORT IDENTIFER Counter */
+	u16			dev_rpti_counter;
+	/* Used for SAM Task Attribute ordering */
+	u32			dev_cur_ordered_id;
+	u32			dev_flags;
+	u32			dev_port_count;
+	/* See transport_device_status_table */
+	u32			dev_status;
+	u32			dev_tcq_window_closed;
+	/* Physical device queue depth */
+	u32			queue_depth;
+	/* Used for SPC-2 reservations enforce of ISIDs */
+	u64			dev_res_bin_isid;
+	t10_task_attr_index_t	dev_task_attr_type;
+	/* Pointer to transport specific device structure */
+	void 			*dev_ptr;
+	u32			dev_index;
+	u64			creation_time;
+	u32			num_resets;
+	u64			num_cmds;
+	u64			read_bytes;
+	u64			write_bytes;
+	spinlock_t		stats_lock;
+	/* Active commands on this virtual SE device */
+	atomic_t		active_cmds;
+	atomic_t		simple_cmds;
+	atomic_t		depth_left;
+	atomic_t		dev_ordered_id;
+	atomic_t		dev_tur_active;
+	atomic_t		execute_tasks;
+	atomic_t		dev_status_thr_count;
+	atomic_t		dev_hoq_count;
+	atomic_t		dev_ordered_sync;
+	struct se_obj		dev_obj;
+	struct se_obj		dev_access_obj;
+	struct se_obj		dev_export_obj;
+	struct se_queue_obj	*dev_queue_obj;
+	struct se_queue_obj	*dev_status_queue_obj;
+	spinlock_t		delayed_cmd_lock;
+	spinlock_t		ordered_cmd_lock;
+	spinlock_t		execute_task_lock;
+	spinlock_t		state_task_lock;
+	spinlock_t		dev_alua_lock;
+	spinlock_t		dev_reservation_lock;
+	spinlock_t		dev_state_lock;
+	spinlock_t		dev_status_lock;
+	spinlock_t		dev_status_thr_lock;
+	spinlock_t		se_port_lock;
+	spinlock_t		se_tmr_lock;
+	/* Used for legacy SPC-2 reservationsa */
+	struct se_node_acl	*dev_reserved_node_acl;
+	/* Used for ALUA Logical Unit Group membership */
+	struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
+	/* Used for SPC-3 Persistent Reservations */
+	struct t10_pr_registration *dev_pr_res_holder;
+	struct list_head	dev_sep_list;
+	struct list_head	dev_tmr_list;
+	struct timer_list	dev_status_timer;
+	/* Pointer to descriptor for processing thread */
+	struct task_struct	*process_thread;
+	pid_t			process_thread_pid;
+	struct task_struct		*dev_mgmt_thread;
+	struct list_head	delayed_cmd_list;
+	struct list_head	ordered_cmd_list;
+	struct list_head	execute_task_list;
+	struct list_head	state_task_list;
+	/* Pointer to associated SE HBA */
+	struct se_hba		*se_hba;
+	struct se_subsystem_dev *se_sub_dev;
+	/* Pointer to template of function pointers for transport */
+	struct se_subsystem_api *transport;
+	/* Linked list for struct se_hba struct se_device list */
+	struct list_head	dev_list;
+	/* Linked list for struct se_global->g_se_dev_list */
+	struct list_head	g_se_dev_list;
+}  ____cacheline_aligned;
+
+#define SE_DEV(cmd)		((struct se_device *)(cmd)->se_lun->lun_se_dev)
+#define SU_DEV(dev)		((struct se_subsystem_dev *)(dev)->se_sub_dev)
+#define DEV_ATTRIB(dev)		(&(dev)->se_sub_dev->se_dev_attrib)
+#define DEV_T10_WWN(dev)	(&(dev)->se_sub_dev->t10_wwn)
+
+struct se_hba {
+	u16			hba_tpgt;
+	u32			hba_id;
+	/* See hba_flags_table */
+	u32			hba_flags;
+	/* Virtual iSCSI devices attached. */
+	u32			dev_count;
+	u32			hba_index;
+	atomic_t		dev_mib_access_count;
+	atomic_t		load_balance_queue;
+	atomic_t		left_queue_depth;
+	/* Maximum queue depth the HBA can handle. */
+	atomic_t		max_queue_depth;
+	/* Pointer to transport specific host structure. */
+	void			*hba_ptr;
+	/* Linked list for struct se_device */
+	struct list_head	hba_dev_list;
+	struct list_head	hba_list;
+	spinlock_t		device_lock;
+	spinlock_t		hba_queue_lock;
+	struct config_group	hba_group;
+	struct mutex		hba_access_mutex;
+	struct se_subsystem_api *transport;
+}  ____cacheline_aligned;
+
+#define SE_HBA(d)		((struct se_hba *)(d)->se_hba)
+
+struct se_lun {
+	/* See transport_lun_status_table */
+	enum transport_lun_status_table lun_status;
+	u32			lun_access;
+	u32			lun_flags;
+	u32			unpacked_lun;
+	atomic_t		lun_acl_count;
+	spinlock_t		lun_acl_lock;
+	spinlock_t		lun_cmd_lock;
+	spinlock_t		lun_sep_lock;
+	struct completion	lun_shutdown_comp;
+	struct list_head	lun_cmd_list;
+	struct list_head	lun_acl_list;
+	struct se_device	*lun_se_dev;
+	struct config_group	lun_group;
+	struct se_port	*lun_sep;
+} ____cacheline_aligned;
+
+#define SE_LUN(c)		((struct se_lun *)(c)->se_lun)
+
+struct se_port {
+	/* RELATIVE TARGET PORT IDENTIFER */
+	u16		sep_rtpi;
+	int		sep_tg_pt_secondary_stat;
+	int		sep_tg_pt_secondary_write_md;
+	u32		sep_index;
+	struct scsi_port_stats sep_stats;
+	/* Used for ALUA Target Port Groups membership */
+	atomic_t	sep_tg_pt_gp_active;
+	atomic_t	sep_tg_pt_secondary_offline;
+	/* Used for PR ALL_TG_PT=1 */
+	atomic_t	sep_tg_pt_ref_cnt;
+	spinlock_t	sep_alua_lock;
+	struct mutex	sep_tg_pt_md_mutex;
+	struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
+	struct se_lun *sep_lun;
+	struct se_portal_group *sep_tpg;
+	struct list_head sep_alua_list;
+	struct list_head sep_list;
+} ____cacheline_aligned;
+
+struct se_tpg_np {
+	struct config_group	tpg_np_group;
+} ____cacheline_aligned;
+
+struct se_portal_group {
+	/* Type of target portal group, see transport_tpg_type_table */
+	enum transport_tpg_type_table se_tpg_type;
+	/* Number of ACLed Initiator Nodes for this TPG */
+	u32			num_node_acls;
+	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
+	atomic_t		tpg_pr_ref_count;
+	/* Spinlock for adding/removing ACLed Nodes */
+	spinlock_t		acl_node_lock;
+	/* Spinlock for adding/removing sessions */
+	spinlock_t		session_lock;
+	spinlock_t		tpg_lun_lock;
+	/* Pointer to $FABRIC_MOD portal group */
+	void			*se_tpg_fabric_ptr;
+	struct list_head	se_tpg_list;
+	/* linked list for initiator ACL list */
+	struct list_head	acl_node_list;
+	struct se_lun		*tpg_lun_list;
+	struct se_lun		tpg_virt_lun0;
+	/* List of TCM sessions assoicated wth this TPG */
+	struct list_head	tpg_sess_list;
+	/* Pointer to $FABRIC_MOD dependent code */
+	struct target_core_fabric_ops *se_tpg_tfo;
+	struct se_wwn		*se_tpg_wwn;
+	struct config_group	tpg_group;
+	struct config_group	*tpg_default_groups[6];
+	struct config_group	tpg_lun_group;
+	struct config_group	tpg_np_group;
+	struct config_group	tpg_acl_group;
+	struct config_group	tpg_attrib_group;
+	struct config_group	tpg_param_group;
+} ____cacheline_aligned;
+
+#define TPG_TFO(se_tpg)	((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo)
+
+struct se_wwn {
+	struct target_fabric_configfs *wwn_tf;
+	struct config_group	wwn_group;
+} ____cacheline_aligned;
+
+struct se_global {
+	u16			alua_lu_gps_counter;
+	int			g_sub_api_initialized;
+	u32			in_shutdown;
+	u32			alua_lu_gps_count;
+	u32			g_hba_id_counter;
+	struct config_group	target_core_hbagroup;
+	struct config_group	alua_group;
+	struct config_group	alua_lu_gps_group;
+	struct list_head	g_lu_gps_list;
+	struct list_head	g_se_tpg_list;
+	struct list_head	g_hba_list;
+	struct list_head	g_se_dev_list;
+	struct se_hba		*g_lun0_hba;
+	struct se_subsystem_dev *g_lun0_su_dev;
+	struct se_device	*g_lun0_dev;
+	struct t10_alua_lu_gp	*default_lu_gp;
+	spinlock_t		g_device_lock;
+	spinlock_t		hba_lock;
+	spinlock_t		se_tpg_lock;
+	spinlock_t		lu_gps_lock;
+	spinlock_t		plugin_class_lock;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
new file mode 100644
index 0000000..40e6e74
--- /dev/null
+++ b/include/target/target_core_configfs.h
@@ -0,0 +1,52 @@
+#define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION
+
+#define TARGET_CORE_CONFIG_ROOT	"/sys/kernel/config"
+
+#define TARGET_CORE_NAME_MAX_LEN	64
+#define TARGET_FABRIC_NAME_SIZE		32
+
+extern struct target_fabric_configfs *target_fabric_configfs_init(
+				struct module *, const char *);
+extern void target_fabric_configfs_free(struct target_fabric_configfs *);
+extern int target_fabric_configfs_register(struct target_fabric_configfs *);
+extern void target_fabric_configfs_deregister(struct target_fabric_configfs *);
+
+struct target_fabric_configfs_template {
+	struct config_item_type tfc_discovery_cit;
+	struct config_item_type	tfc_wwn_cit;
+	struct config_item_type tfc_tpg_cit;
+	struct config_item_type tfc_tpg_base_cit;
+	struct config_item_type tfc_tpg_lun_cit;
+	struct config_item_type tfc_tpg_port_cit;
+	struct config_item_type tfc_tpg_np_cit;
+	struct config_item_type tfc_tpg_np_base_cit;
+	struct config_item_type tfc_tpg_attrib_cit;
+	struct config_item_type tfc_tpg_param_cit;
+	struct config_item_type tfc_tpg_nacl_cit;
+	struct config_item_type tfc_tpg_nacl_base_cit;
+	struct config_item_type tfc_tpg_nacl_attrib_cit;
+	struct config_item_type tfc_tpg_nacl_auth_cit;
+	struct config_item_type tfc_tpg_nacl_param_cit;
+	struct config_item_type tfc_tpg_mappedlun_cit;
+};
+
+struct target_fabric_configfs {
+	char			tf_name[TARGET_FABRIC_NAME_SIZE];
+	atomic_t		tf_access_cnt;
+	struct list_head	tf_list;
+	struct config_group	tf_group;
+	struct config_group	tf_disc_group;
+	struct config_group	*tf_default_groups[2];
+	/* Pointer to fabric's config_item */
+	struct config_item	*tf_fabric;
+	/* Passed from fabric modules */
+	struct config_item_type	*tf_fabric_cit;
+	/* Pointer to target core subsystem */
+	struct configfs_subsystem *tf_subsys;
+	/* Pointer to fabric's struct module */
+	struct module *tf_module;
+	struct target_core_fabric_ops tf_ops;
+	struct target_fabric_configfs_template tf_cit_tmpl;
+};
+
+#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl)
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
new file mode 100644
index 0000000..52b18a5
--- /dev/null
+++ b/include/target/target_core_device.h
@@ -0,0 +1,61 @@
+#ifndef TARGET_CORE_DEVICE_H
+#define TARGET_CORE_DEVICE_H
+
+extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32);
+extern int transport_get_lun_for_tmr(struct se_cmd *, u32);
+extern struct se_dev_entry *core_get_se_deve_from_rtpi(
+					struct se_node_acl *, u16);
+extern int core_free_device_list_for_node(struct se_node_acl *,
+					struct se_portal_group *);
+extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
+extern void core_update_device_list_access(u32, u32, struct se_node_acl *);
+extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32,
+					u32, struct se_node_acl *,
+					struct se_portal_group *, int);
+extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
+extern int core_dev_export(struct se_device *, struct se_portal_group *,
+					struct se_lun *);
+extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
+					struct se_lun *);
+extern int transport_core_report_lun_response(struct se_cmd *);
+extern void se_release_device_for_hba(struct se_device *);
+extern void se_release_vpd_for_dev(struct se_device *);
+extern void se_clear_dev_ports(struct se_device *);
+extern int se_free_virtual_device(struct se_device *, struct se_hba *);
+extern int se_dev_check_online(struct se_device *);
+extern int se_dev_check_shutdown(struct se_device *);
+extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
+extern int se_dev_set_task_timeout(struct se_device *, u32);
+extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+extern int se_dev_set_unmap_granularity(struct se_device *, u32);
+extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+extern int se_dev_set_emulate_dpo(struct se_device *, int);
+extern int se_dev_set_emulate_fua_write(struct se_device *, int);
+extern int se_dev_set_emulate_fua_read(struct se_device *, int);
+extern int se_dev_set_emulate_write_cache(struct se_device *, int);
+extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+extern int se_dev_set_emulate_tas(struct se_device *, int);
+extern int se_dev_set_emulate_tpu(struct se_device *, int);
+extern int se_dev_set_emulate_tpws(struct se_device *, int);
+extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
+extern int se_dev_set_queue_depth(struct se_device *, u32);
+extern int se_dev_set_max_sectors(struct se_device *, u32);
+extern int se_dev_set_optimal_sectors(struct se_device *, u32);
+extern int se_dev_set_block_size(struct se_device *, u32);
+extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
+					struct se_device *, u32);
+extern int core_dev_del_lun(struct se_portal_group *, u32);
+extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+							u32, char *, int *);
+extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+						struct se_lun_acl *, u32, u32);
+extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+						struct se_lun *, struct se_lun_acl *);
+extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
+						struct se_lun_acl *lacl);
+extern int core_dev_setup_virtual_lun0(void);
+extern void core_dev_release_virtual_lun0(void);
+
+#endif /* TARGET_CORE_DEVICE_H */
diff --git a/include/target/target_core_fabric_configfs.h b/include/target/target_core_fabric_configfs.h
new file mode 100644
index 0000000..a26fb75
--- /dev/null
+++ b/include/target/target_core_fabric_configfs.h
@@ -0,0 +1,106 @@
+/*
+ * Used for tfc_wwn_cit attributes
+ */
+
+#include <target/configfs_macros.h>
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_attrib, se_node_acl);
+#define TF_NACL_ATTRIB_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_attrib_attribute _fabric##_nacl_attrib_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_attrib_show_##_name,				\
+	_fabric##_nacl_attrib_store_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_auth, se_node_acl);
+#define TF_NACL_AUTH_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_auth_show_##_name,				\
+	_fabric##_nacl_auth_store_##_name);
+
+#define TF_NACL_AUTH_ATTR_RO(_fabric, _name)				\
+static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_nacl_auth_show_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_param, se_node_acl);
+#define TF_NACL_PARAM_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_param_show_##_name,				\
+	_fabric##_nacl_param_store_##_name);
+
+#define TF_NACL_PARAM_ATTR_RO(_fabric, _name)				\
+static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_nacl_param_show_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_base, se_node_acl);
+#define TF_NACL_BASE_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_show_##_name,					\
+	_fabric##_nacl_store_##_name);
+
+#define TF_NACL_BASE_ATTR_RO(_fabric, _name)				\
+static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_nacl_show_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_np_base, se_tpg_np);
+#define TF_NP_BASE_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_np_base_attribute _fabric##_np_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_np_show_##_name,					\
+	_fabric##_np_store_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_tpg_attrib, se_portal_group);
+#define TF_TPG_ATTRIB_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_tpg_attrib_attribute _fabric##_tpg_attrib_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_tpg_attrib_show_##_name,				\
+	_fabric##_tpg_attrib_store_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_tpg_param, se_portal_group);
+#define TF_TPG_PARAM_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_tpg_param_attribute _fabric##_tpg_param_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_tpg_param_show_##_name,				\
+	_fabric##_tpg_param_store_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_tpg, se_portal_group);
+#define TF_TPG_BASE_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_tpg_attribute _fabric##_tpg_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_tpg_show_##_name,					\
+	_fabric##_tpg_store_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs);
+#define TF_WWN_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_wwn_attribute _fabric##_wwn_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_wwn_show_attr_##_name,				\
+	_fabric##_wwn_store_attr_##_name);
+
+#define TF_WWN_ATTR_RO(_fabric, _name)					\
+static struct target_fabric_wwn_attribute _fabric##_wwn_##_name =	\
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_wwn_show_attr_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_discovery, target_fabric_configfs);
+#define TF_DISC_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_disc_show_##_name,					\
+	_fabric##_disc_store_##_name);
+
+#define TF_DISC_ATTR_RO(_fabric, _name)					\
+static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_disc_show_##_name);
+
+extern int target_fabric_setup_cits(struct target_fabric_configfs *);
diff --git a/include/target/target_core_fabric_lib.h b/include/target/target_core_fabric_lib.h
new file mode 100644
index 0000000..c2f8d0e
--- /dev/null
+++ b/include/target/target_core_fabric_lib.h
@@ -0,0 +1,28 @@
+#ifndef TARGET_CORE_FABRIC_LIB_H
+#define TARGET_CORE_FABRIC_LIB_H
+
+extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
+extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
+			const char *, u32 *, char **);
+
+extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
+extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
+			const char *, u32 *, char **);
+
+extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
+extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
+			const char *, u32 *, char **);
+
+#endif /* TARGET_CORE_FABRIC_LIB_H */
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
new file mode 100644
index 0000000..f3ac12b
--- /dev/null
+++ b/include/target/target_core_fabric_ops.h
@@ -0,0 +1,100 @@
+/* Defined in target_core_configfs.h */
+struct target_fabric_configfs;
+
+struct target_core_fabric_ops {
+	struct configfs_subsystem *tf_subsys;
+	/*
+	 * Optional to signal struct se_task->task_sg[] padding entries
+	 * for scatterlist chaining using transport_do_task_sg_link(),
+	 * disabled by default
+	 */
+	int task_sg_chaining:1;
+	char *(*get_fabric_name)(void);
+	u8 (*get_fabric_proto_ident)(struct se_portal_group *);
+	char *(*tpg_get_wwn)(struct se_portal_group *);
+	u16 (*tpg_get_tag)(struct se_portal_group *);
+	u32 (*tpg_get_default_depth)(struct se_portal_group *);
+	u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
+				struct se_node_acl *,
+				struct t10_pr_registration *, int *,
+				unsigned char *);
+	u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
+				struct se_node_acl *,
+				struct t10_pr_registration *, int *);
+	char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
+				const char *, u32 *, char **);
+	int (*tpg_check_demo_mode)(struct se_portal_group *);
+	int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
+	int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
+	int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
+	struct se_node_acl *(*tpg_alloc_fabric_acl)(
+					struct se_portal_group *);
+	void (*tpg_release_fabric_acl)(struct se_portal_group *,
+					struct se_node_acl *);
+	u32 (*tpg_get_inst_index)(struct se_portal_group *);
+	/*
+	 * Optional function pointer for TCM to perform command map
+	 * from TCM processing thread context, for those struct se_cmd
+	 * initally allocated in interrupt context.
+	 */
+	int (*new_cmd_map)(struct se_cmd *);
+	/*
+	 * Optional function pointer for TCM fabric modules that use
+	 * Linux/NET sockets to allocate struct iovec array to struct se_cmd
+	 */
+	int (*alloc_cmd_iovecs)(struct se_cmd *);
+	/*
+	 * Optional to release struct se_cmd and fabric dependent allocated
+	 * I/O descriptor in transport_cmd_check_stop()
+	 */
+	void (*check_stop_free)(struct se_cmd *);
+	void (*release_cmd_to_pool)(struct se_cmd *);
+	void (*release_cmd_direct)(struct se_cmd *);
+	/*
+	 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
+	 */
+	int (*shutdown_session)(struct se_session *);
+	void (*close_session)(struct se_session *);
+	void (*stop_session)(struct se_session *, int, int);
+	void (*fall_back_to_erl0)(struct se_session *);
+	int (*sess_logged_in)(struct se_session *);
+	u32 (*sess_get_index)(struct se_session *);
+	/*
+	 * Used only for SCSI fabrics that contain multi-value TransportIDs
+	 * (like iSCSI).  All other SCSI fabrics should set this to NULL.
+	 */
+	u32 (*sess_get_initiator_sid)(struct se_session *,
+				      unsigned char *, u32);
+	int (*write_pending)(struct se_cmd *);
+	int (*write_pending_status)(struct se_cmd *);
+	void (*set_default_node_attributes)(struct se_node_acl *);
+	u32 (*get_task_tag)(struct se_cmd *);
+	int (*get_cmd_state)(struct se_cmd *);
+	void (*new_cmd_failure)(struct se_cmd *);
+	int (*queue_data_in)(struct se_cmd *);
+	int (*queue_status)(struct se_cmd *);
+	int (*queue_tm_rsp)(struct se_cmd *);
+	u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
+	u16 (*get_fabric_sense_len)(void);
+	int (*is_state_remove)(struct se_cmd *);
+	u64 (*pack_lun)(unsigned int);
+	/*
+	 * fabric module calls for target_core_fabric_configfs.c
+	 */
+	struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
+				struct config_group *, const char *);
+	void (*fabric_drop_wwn)(struct se_wwn *);
+	struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
+				struct config_group *, const char *);
+	void (*fabric_drop_tpg)(struct se_portal_group *);
+	int (*fabric_post_link)(struct se_portal_group *,
+				struct se_lun *);
+	void (*fabric_pre_unlink)(struct se_portal_group *,
+				struct se_lun *);
+	struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
+				struct config_group *, const char *);
+	void (*fabric_drop_np)(struct se_tpg_np *);
+	struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
+				struct config_group *, const char *);
+	void (*fabric_drop_nodeacl)(struct se_node_acl *);
+};
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
new file mode 100644
index 0000000..6c8248b
--- /dev/null
+++ b/include/target/target_core_tmr.h
@@ -0,0 +1,43 @@
+#ifndef TARGET_CORE_TMR_H
+#define TARGET_CORE_TMR_H
+
+/* task management function values */
+#ifdef ABORT_TASK
+#undef ABORT_TASK
+#endif /* ABORT_TASK */
+#define ABORT_TASK				1
+#ifdef ABORT_TASK_SET
+#undef ABORT_TASK_SET
+#endif /* ABORT_TASK_SET */
+#define ABORT_TASK_SET				2
+#ifdef CLEAR_ACA
+#undef CLEAR_ACA
+#endif /* CLEAR_ACA */
+#define CLEAR_ACA				3
+#ifdef CLEAR_TASK_SET
+#undef CLEAR_TASK_SET
+#endif /* CLEAR_TASK_SET */
+#define CLEAR_TASK_SET				4
+#define LUN_RESET				5
+#define TARGET_WARM_RESET			6
+#define TARGET_COLD_RESET			7
+#define TASK_REASSIGN				8
+
+/* task management response values */
+#define TMR_FUNCTION_COMPLETE			0
+#define TMR_TASK_DOES_NOT_EXIST			1
+#define TMR_LUN_DOES_NOT_EXIST			2
+#define TMR_TASK_STILL_ALLEGIANT		3
+#define TMR_TASK_FAILOVER_NOT_SUPPORTED		4
+#define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED	5
+#define TMR_FUNCTION_AUTHORIZATION_FAILED	6
+#define TMR_FUNCTION_REJECTED			255
+
+extern struct kmem_cache *se_tmr_req_cache;
+
+extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8);
+extern void core_tmr_release_req(struct se_tmr_req *);
+extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
+				struct list_head *, struct se_cmd *);
+
+#endif /* TARGET_CORE_TMR_H */
diff --git a/include/target/target_core_tpg.h b/include/target/target_core_tpg.h
new file mode 100644
index 0000000..77e1872
--- /dev/null
+++ b/include/target/target_core_tpg.h
@@ -0,0 +1,35 @@
+#ifndef TARGET_CORE_TPG_H
+#define TARGET_CORE_TPG_H
+
+extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+						const char *);
+extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+						unsigned char *);
+extern void core_tpg_add_node_to_devs(struct se_node_acl *,
+						struct se_portal_group *);
+extern struct se_node_acl *core_tpg_check_initiator_node_acl(
+						struct se_portal_group *,
+						unsigned char *);
+extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
+extern void core_tpg_wait_for_mib_ref(struct se_node_acl *);
+extern void core_tpg_clear_object_luns(struct se_portal_group *);
+extern struct se_node_acl *core_tpg_add_initiator_node_acl(
+					struct se_portal_group *,
+					struct se_node_acl *,
+					const char *, u32);
+extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
+						struct se_node_acl *, int);
+extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
+						unsigned char *, u32, int);
+extern int core_tpg_register(struct target_core_fabric_ops *,
+					struct se_wwn *,
+					struct se_portal_group *, void *,
+					int);
+extern int core_tpg_deregister(struct se_portal_group *);
+extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
+extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32,
+				void *);
+extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
+extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
+
+#endif /* TARGET_CORE_TPG_H */
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
new file mode 100644
index 0000000..66f44e5
--- /dev/null
+++ b/include/target/target_core_transport.h
@@ -0,0 +1,351 @@
+#ifndef TARGET_CORE_TRANSPORT_H
+#define TARGET_CORE_TRANSPORT_H
+
+#define TARGET_CORE_VERSION			TARGET_CORE_MOD_VERSION
+
+/* Attempts before moving from SHORT to LONG */
+#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD	3
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT	3  /* In milliseconds */
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG	10 /* In milliseconds */
+
+#define PYX_TRANSPORT_STATUS_INTERVAL		5 /* In seconds */
+
+#define PYX_TRANSPORT_SENT_TO_TRANSPORT		0
+#define PYX_TRANSPORT_WRITE_PENDING		1
+
+#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE	-1
+#define PYX_TRANSPORT_HBA_QUEUE_FULL		-2
+#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS	-3
+#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES	-4
+#define PYX_TRANSPORT_INVALID_CDB_FIELD		-5
+#define PYX_TRANSPORT_INVALID_PARAMETER_LIST	-6
+#define PYX_TRANSPORT_LU_COMM_FAILURE		-7
+#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE		-8
+#define PYX_TRANSPORT_WRITE_PROTECTED		-9
+#define PYX_TRANSPORT_TASK_TIMEOUT		-10
+#define PYX_TRANSPORT_RESERVATION_CONFLICT	-11
+#define PYX_TRANSPORT_ILLEGAL_REQUEST		-12
+#define PYX_TRANSPORT_USE_SENSE_REASON		-13
+
+#ifndef SAM_STAT_RESERVATION_CONFLICT
+#define SAM_STAT_RESERVATION_CONFLICT		0x18
+#endif
+
+#define TRANSPORT_PLUGIN_FREE			0
+#define TRANSPORT_PLUGIN_REGISTERED		1
+
+#define TRANSPORT_PLUGIN_PHBA_PDEV		1
+#define TRANSPORT_PLUGIN_VHBA_PDEV		2
+#define TRANSPORT_PLUGIN_VHBA_VDEV		3
+
+/* For SE OBJ Plugins, in seconds */
+#define TRANSPORT_TIMEOUT_TUR			10
+#define TRANSPORT_TIMEOUT_TYPE_DISK		60
+#define TRANSPORT_TIMEOUT_TYPE_ROM		120
+#define TRANSPORT_TIMEOUT_TYPE_TAPE		600
+#define TRANSPORT_TIMEOUT_TYPE_OTHER		300
+
+/* For se_task->task_state_flags */
+#define TSF_EXCEPTION_CLEARED			0x01
+
+/*
+ * struct se_subsystem_dev->su_dev_flags
+*/
+#define SDF_FIRMWARE_VPD_UNIT_SERIAL		0x00000001
+#define SDF_EMULATED_VPD_UNIT_SERIAL		0x00000002
+#define SDF_USING_UDEV_PATH			0x00000004
+#define SDF_USING_ALIAS				0x00000008
+
+/*
+ * struct se_device->dev_flags
+ */
+#define DF_READ_ONLY				0x00000001
+#define DF_SPC2_RESERVATIONS			0x00000002
+#define DF_SPC2_RESERVATIONS_WITH_ISID		0x00000004
+
+/* struct se_dev_attrib sanity values */
+/* 10 Minutes */
+#define DA_TASK_TIMEOUT_MAX			600
+/* Default max_unmap_lba_count */
+#define DA_MAX_UNMAP_LBA_COUNT			0
+/* Default max_unmap_block_desc_count */
+#define DA_MAX_UNMAP_BLOCK_DESC_COUNT		0
+/* Default unmap_granularity */
+#define DA_UNMAP_GRANULARITY_DEFAULT		0
+/* Default unmap_granularity_alignment */
+#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT	0
+/* Emulation for Direct Page Out */
+#define DA_EMULATE_DPO				0
+/* Emulation for Forced Unit Access WRITEs */
+#define DA_EMULATE_FUA_WRITE			1
+/* Emulation for Forced Unit Access READs */
+#define DA_EMULATE_FUA_READ			0
+/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
+#define DA_EMULATE_WRITE_CACHE			0
+/* Emulation for UNIT ATTENTION Interlock Control */
+#define DA_EMULATE_UA_INTLLCK_CTRL		0
+/* Emulation for TASK_ABORTED status (TAS) by default */
+#define DA_EMULATE_TAS				1
+/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
+#define DA_EMULATE_TPU				0
+/*
+ * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
+ * block/blk-lib.c:blkdev_issue_discard()
+ */
+#define DA_EMULATE_TPWS				0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_RESERVATIONS			0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_ALUA				0
+/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
+#define DA_ENFORCE_PR_ISIDS			1
+#define DA_STATUS_MAX_SECTORS_MIN		16
+#define DA_STATUS_MAX_SECTORS_MAX		8192
+
+#define SE_MODE_PAGE_BUF			512
+
+#define MOD_MAX_SECTORS(ms, bs)			(ms % (PAGE_SIZE / bs))
+
+struct se_mem;
+struct se_subsystem_api;
+
+extern int init_se_global(void);
+extern void release_se_global(void);
+extern void transport_init_queue_obj(struct se_queue_obj *);
+extern int transport_subsystem_check_init(void);
+extern int transport_subsystem_register(struct se_subsystem_api *);
+extern void transport_subsystem_release(struct se_subsystem_api *);
+extern void transport_load_plugins(void);
+extern struct se_session *transport_init_session(void);
+extern void __transport_register_session(struct se_portal_group *,
+					struct se_node_acl *,
+					struct se_session *, void *);
+extern void transport_register_session(struct se_portal_group *,
+					struct se_node_acl *,
+					struct se_session *, void *);
+extern void transport_free_session(struct se_session *);
+extern void transport_deregister_session_configfs(struct se_session *);
+extern void transport_deregister_session(struct se_session *);
+extern void transport_cmd_finish_abort(struct se_cmd *, int);
+extern void transport_cmd_finish_abort_tmr(struct se_cmd *);
+extern void transport_complete_sync_cache(struct se_cmd *, int);
+extern void transport_complete_task(struct se_task *, int);
+extern void transport_add_task_to_execute_queue(struct se_task *,
+						struct se_task *,
+						struct se_device *);
+unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+extern void transport_dump_dev_state(struct se_device *, char *, int *);
+extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
+					unsigned long long, char *, int *);
+extern void transport_dump_vpd_proto_id(struct t10_vpd *,
+					unsigned char *, int);
+extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
+extern int transport_dump_vpd_assoc(struct t10_vpd *,
+					unsigned char *, int);
+extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
+extern int transport_dump_vpd_ident_type(struct t10_vpd *,
+					unsigned char *, int);
+extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
+extern int transport_dump_vpd_ident(struct t10_vpd *,
+					unsigned char *, int);
+extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
+extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
+					struct se_subsystem_api *,
+					struct se_subsystem_dev *, u32,
+					void *, struct se_dev_limits *,
+					const char *, const char *);
+extern void transport_device_setup_cmd(struct se_cmd *);
+extern void transport_init_se_cmd(struct se_cmd *,
+					struct target_core_fabric_ops *,
+					struct se_session *, u32, int, int,
+					unsigned char *);
+extern void transport_free_se_cmd(struct se_cmd *);
+extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
+extern int transport_generic_handle_cdb(struct se_cmd *);
+extern int transport_generic_handle_cdb_map(struct se_cmd *);
+extern int transport_generic_handle_data(struct se_cmd *);
+extern void transport_new_cmd_failure(struct se_cmd *);
+extern int transport_generic_handle_tmr(struct se_cmd *);
+extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
+extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
+extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
+				struct scatterlist *, u32);
+extern int transport_clear_lun_from_sessions(struct se_lun *);
+extern int transport_check_aborted_status(struct se_cmd *, int);
+extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
+extern void transport_send_task_abort(struct se_cmd *);
+extern void transport_release_cmd_to_pool(struct se_cmd *);
+extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
+extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
+extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32);
+extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
+					void *, struct se_mem *,
+					struct se_mem **, u32 *, u32 *);
+extern void transport_do_task_sg_chain(struct se_cmd *);
+extern void transport_generic_process_write(struct se_cmd *);
+extern int transport_generic_do_tmr(struct se_cmd *);
+/* From target_core_alua.c */
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+
+/*
+ * Each se_transport_task_t can have N number of possible struct se_task's
+ * for the storage transport(s) to possibly execute.
+ * Used primarily for splitting up CDBs that exceed the physical storage
+ * HBA's maximum sector count per task.
+ */
+struct se_mem {
+	struct page	*se_page;
+	u32		se_len;
+	u32		se_off;
+	struct list_head se_list;
+} ____cacheline_aligned;
+
+/*
+ * 	Each type of disk transport supported MUST have a template defined
+ *	within its .h file.
+ */
+struct se_subsystem_api {
+	/*
+	 * The Name. :-)
+	 */
+	char name[16];
+	/*
+	 * Transport Type.
+	 */
+	u8 transport_type;
+	/*
+	 * struct module for struct se_hba references
+	 */
+	struct module *owner;
+	/*
+	 * Used for global se_subsystem_api list_head
+	 */
+	struct list_head sub_api_list;
+	/*
+	 * For SCF_SCSI_NON_DATA_CDB
+	 */
+	int (*cdb_none)(struct se_task *);
+	/*
+	 * For SCF_SCSI_CONTROL_NONSG_IO_CDB
+	 */
+	int (*map_task_non_SG)(struct se_task *);
+	/*
+	 * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB
+	 */
+	int (*map_task_SG)(struct se_task *);
+	/*
+	 * attach_hba():
+	 */
+	int (*attach_hba)(struct se_hba *, u32);
+	/*
+	 * detach_hba():
+	 */
+	void (*detach_hba)(struct se_hba *);
+	/*
+	 * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
+	 *		Linux/SCSI struct Scsi_Host passthrough
+	*/
+	int (*pmode_enable_hba)(struct se_hba *, unsigned long);
+	/*
+	 * allocate_virtdevice():
+	 */
+	void *(*allocate_virtdevice)(struct se_hba *, const char *);
+	/*
+	 * create_virtdevice(): Only for Virtual HBAs
+	 */
+	struct se_device *(*create_virtdevice)(struct se_hba *,
+				struct se_subsystem_dev *, void *);
+	/*
+	 * free_device():
+	 */
+	void (*free_device)(void *);
+
+	/*
+	 * dpo_emulated():
+	 */
+	int (*dpo_emulated)(struct se_device *);
+	/*
+	 * fua_write_emulated():
+	 */
+	int (*fua_write_emulated)(struct se_device *);
+	/*
+	 * fua_read_emulated():
+	 */
+	int (*fua_read_emulated)(struct se_device *);
+	/*
+	 * write_cache_emulated():
+	 */
+	int (*write_cache_emulated)(struct se_device *);
+	/*
+	 * transport_complete():
+	 *
+	 * Use transport_generic_complete() for majority of DAS transport
+	 * drivers.  Provided out of convenience.
+	 */
+	int (*transport_complete)(struct se_task *task);
+	struct se_task *(*alloc_task)(struct se_cmd *);
+	/*
+	 * do_task():
+	 */
+	int (*do_task)(struct se_task *);
+	/*
+	 * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
+	 * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
+	 */
+	int (*do_discard)(struct se_device *, sector_t, u32);
+	/*
+	 * Used  by virtual subsystem plugins IBLOCK and FILEIO to emulate
+	 * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
+	 */
+	void (*do_sync_cache)(struct se_task *);
+	/*
+	 * free_task():
+	 */
+	void (*free_task)(struct se_task *);
+	/*
+	 * check_configfs_dev_params():
+	 */
+	ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
+	/*
+	 * set_configfs_dev_params():
+	 */
+	ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
+						const char *, ssize_t);
+	/*
+	 * show_configfs_dev_params():
+	 */
+	ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
+						char *);
+	/*
+	 * get_cdb():
+	 */
+	unsigned char *(*get_cdb)(struct se_task *);
+	/*
+	 * get_device_rev():
+	 */
+	u32 (*get_device_rev)(struct se_device *);
+	/*
+	 * get_device_type():
+	 */
+	u32 (*get_device_type)(struct se_device *);
+	/*
+	 * Get the sector_t from a subsystem backstore..
+	 */
+	sector_t (*get_blocks)(struct se_device *);
+	/*
+	 * do_se_mem_map():
+	 */
+	int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
+				struct se_mem *, struct se_mem **, u32 *, u32 *);
+	/*
+	 * get_sense_buffer():
+	 */
+	unsigned char *(*get_sense_buffer)(struct se_task *);
+} ____cacheline_aligned;
+
+#define TRANSPORT(dev)		((dev)->transport)
+#define HBA_TRANSPORT(hba)	((hba)->transport)
+
+extern struct se_global *se_global;
+
+#endif /* TARGET_CORE_TRANSPORT_H */
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index c7bb2f0..c6bae36 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -1,5 +1,15 @@
+/*
+ * Because linux/module.h has tracepoints in the header, and ftrace.h
+ * eventually includes this file, define_trace.h includes linux/module.h
+ * But we do not want the module.h to override the TRACE_SYSTEM macro
+ * variable that define_trace.h is processing, so we only set it
+ * when module events are being processed, which would happen when
+ * CREATE_TRACE_POINTS is defined.
+ */
+#ifdef CREATE_TRACE_POINTS
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM module
+#endif
 
 #if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _TRACE_MODULE_H
diff --git a/init/Kconfig b/init/Kconfig
index 4f6cdbf..4e33790 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -515,21 +515,6 @@
 
 	  Accept the default if unsure.
 
-config SRCU_SYNCHRONIZE_DELAY
-	int "Microseconds to delay before waiting for readers"
-	range 0 20
-	default 10
-	help
-	  This option controls how long SRCU delays before entering its
-	  loop waiting on SRCU readers.  The purpose of this loop is
-	  to avoid the unconditional context-switch penalty that would
-	  otherwise be incurred if there was an active SRCU reader,
-	  in a manner similar to adaptive locking schemes.  This should
-	  be set to be a bit longer than the common-case SRCU read-side
-	  critical-section overhead.
-
-	  Accept the default if unsure.
-
 endmenu # "RCU Subsystem"
 
 config IKCONFIG
diff --git a/kernel/futex.c b/kernel/futex.c
index 5207563..b766d28 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -826,10 +826,9 @@
 	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
 
 	/*
-	 * This happens when we have stolen the lock and the original
-	 * pending owner did not enqueue itself back on the rt_mutex.
-	 * Thats not a tragedy. We know that way, that a lock waiter
-	 * is on the fly. We make the futex_q waiter the pending owner.
+	 * It is possible that the next waiter (the one that brought
+	 * this owner to the kernel) timed out and is no longer
+	 * waiting on the lock.
 	 */
 	if (!new_owner)
 		new_owner = this->task;
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 0344937..0c343b9 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -189,7 +189,8 @@
 	unsigned long flags;
 
 	for (;;) {
-		wait_event(rcu_kthread_wq, have_rcu_kthread_work != 0);
+		wait_event_interruptible(rcu_kthread_wq,
+					 have_rcu_kthread_work != 0);
 		morework = rcu_boost();
 		local_irq_save(flags);
 		work = have_rcu_kthread_work;
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 98d8c1e..73ce23f 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -156,6 +156,16 @@
 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
 /*
+ * We use an adaptive strategy for synchronize_srcu() and especially for
+ * synchronize_srcu_expedited().  We spin for a fixed time period
+ * (defined below) to allow SRCU readers to exit their read-side critical
+ * sections.  If there are still some readers after 10 microseconds,
+ * we repeatedly block for 1-millisecond time periods.  This approach
+ * has done well in testing, so there is no need for a config parameter.
+ */
+#define SYNCHRONIZE_SRCU_READER_DELAY 10
+
+/*
  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
  */
 static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
@@ -207,11 +217,12 @@
 	 * will have finished executing.  We initially give readers
 	 * an arbitrarily chosen 10 microseconds to get out of their
 	 * SRCU read-side critical sections, then loop waiting 1/HZ
-	 * seconds per iteration.
+	 * seconds per iteration.  The 10-microsecond value has done
+	 * very well in testing.
 	 */
 
 	if (srcu_readers_active_idx(sp, idx))
-		udelay(CONFIG_SRCU_SYNCHRONIZE_DELAY);
+		udelay(SYNCHRONIZE_SRCU_READER_DELAY);
 	while (srcu_readers_active_idx(sp, idx))
 		schedule_timeout_interruptible(1);
 
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c50a034..6519cf62 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -113,7 +113,7 @@
  * @shift:	pointer to shift variable
  * @from:	frequency to convert from
  * @to:		frequency to convert to
- * @minsec:	guaranteed runtime conversion range in seconds
+ * @maxsec:	guaranteed runtime conversion range in seconds
  *
  * The function evaluates the shift/mult pair for the scaled math
  * operations of clocksources and clockevents.
@@ -122,7 +122,7 @@
  * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
  * event @to is the counter frequency and @from is NSEC_PER_SEC.
  *
- * The @minsec conversion range argument controls the time frame in
+ * The @maxsec conversion range argument controls the time frame in
  * seconds which must be covered by the runtime conversion with the
  * calculated mult and shift factors. This guarantees that no 64bit
  * overflow happens when the input value of the conversion is
@@ -131,7 +131,7 @@
  * factors.
  */
 void
-clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
+clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
 {
 	u64 tmp;
 	u32 sft, sftacc= 32;
@@ -140,7 +140,7 @@
 	 * Calculate the shift factor which is limiting the conversion
 	 * range:
 	 */
-	tmp = ((u64)minsec * from) >> 32;
+	tmp = ((u64)maxsec * from) >> 32;
 	while (tmp) {
 		tmp >>=1;
 		sftacc--;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5536aaf..d27c756 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -49,7 +49,7 @@
 	u32	mult;
 };
 
-struct timekeeper timekeeper;
+static struct timekeeper timekeeper;
 
 /**
  * timekeeper_setup_internals - Set up internals to use clocksource clock.
@@ -164,7 +164,7 @@
 /*
  * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
  */
-struct timespec raw_time;
+static struct timespec raw_time;
 
 /* flag for if timekeeping is suspended */
 int __read_mostly timekeeping_suspended;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index bac752f..b706529 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -23,9 +23,6 @@
 static int syscall_enter_define_fields(struct ftrace_event_call *call);
 static int syscall_exit_define_fields(struct ftrace_event_call *call);
 
-/* All syscall exit events have the same fields */
-static LIST_HEAD(syscall_exit_fields);
-
 static struct list_head *
 syscall_get_enter_fields(struct ftrace_event_call *call)
 {
@@ -34,34 +31,28 @@
 	return &entry->enter_fields;
 }
 
-static struct list_head *
-syscall_get_exit_fields(struct ftrace_event_call *call)
-{
-	return &syscall_exit_fields;
-}
-
 struct trace_event_functions enter_syscall_print_funcs = {
-	.trace                  = print_syscall_enter,
+	.trace		= print_syscall_enter,
 };
 
 struct trace_event_functions exit_syscall_print_funcs = {
-	.trace                  = print_syscall_exit,
+	.trace		= print_syscall_exit,
 };
 
 struct ftrace_event_class event_class_syscall_enter = {
-	.system			= "syscalls",
-	.reg			= syscall_enter_register,
-	.define_fields		= syscall_enter_define_fields,
-	.get_fields		= syscall_get_enter_fields,
-	.raw_init		= init_syscall_trace,
+	.system		= "syscalls",
+	.reg		= syscall_enter_register,
+	.define_fields	= syscall_enter_define_fields,
+	.get_fields	= syscall_get_enter_fields,
+	.raw_init	= init_syscall_trace,
 };
 
 struct ftrace_event_class event_class_syscall_exit = {
-	.system			= "syscalls",
-	.reg			= syscall_exit_register,
-	.define_fields		= syscall_exit_define_fields,
-	.get_fields		= syscall_get_exit_fields,
-	.raw_init		= init_syscall_trace,
+	.system		= "syscalls",
+	.reg		= syscall_exit_register,
+	.define_fields	= syscall_exit_define_fields,
+	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
+	.raw_init	= init_syscall_trace,
 };
 
 extern unsigned long __start_syscalls_metadata[];
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e92f047..321fc74 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -409,6 +409,7 @@
 	int ret;
 	struct memory_notify arg;
 
+	lock_memory_hotplug();
 	arg.start_pfn = pfn;
 	arg.nr_pages = nr_pages;
 	arg.status_change_nid = -1;
@@ -421,6 +422,7 @@
 	ret = notifier_to_errno(ret);
 	if (ret) {
 		memory_notify(MEM_CANCEL_ONLINE, &arg);
+		unlock_memory_hotplug();
 		return ret;
 	}
 	/*
@@ -445,6 +447,7 @@
 		printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
 			nr_pages, pfn);
 		memory_notify(MEM_CANCEL_ONLINE, &arg);
+		unlock_memory_hotplug();
 		return ret;
 	}
 
@@ -469,6 +472,7 @@
 
 	if (onlined_pages)
 		memory_notify(MEM_ONLINE, &arg);
+	unlock_memory_hotplug();
 
 	return 0;
 }
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index d030548..0369f5b 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -92,32 +92,29 @@
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
 		       pmd_t *pmdp)
 {
 	pmd_t pmd;
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
-	BUG();
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 	pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 	return pmd;
 }
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
 			   pmd_t *pmdp)
 {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	pmd_t pmd = pmd_mksplitting(*pmdp);
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 	/* tlb flush only to serialize against gup-fast */
 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-	BUG();
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 }
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
diff --git a/mm/slab.c b/mm/slab.c
index 2640374..37961d1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -284,7 +284,7 @@
  * Need this for bootstrapping a per node allocator.
  */
 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
+static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
 #define	CACHE_CACHE 0
 #define	SIZE_AC MAX_NUMNODES
 #define	SIZE_L3 (2 * MAX_NUMNODES)
@@ -4053,7 +4053,7 @@
  * necessary. Note that the l3 listlock also protects the array_cache
  * if drain_array() is used on the shared array.
  */
-void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
 			 struct array_cache *ac, int force, int node)
 {
 	int tofree;
@@ -4317,7 +4317,7 @@
  * @count: data length
  * @ppos: unused
  */
-ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 		       size_t count, loff_t *ppos)
 {
 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
diff --git a/mm/slub.c b/mm/slub.c
index c7ef007..e15aa7f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3797,7 +3797,7 @@
 		}
 	}
 
-	down_read(&slub_lock);
+	lock_memory_hotplug();
 #ifdef CONFIG_SLUB_DEBUG
 	if (flags & SO_ALL) {
 		for_each_node_state(node, N_NORMAL_MEMORY) {
@@ -3838,7 +3838,7 @@
 			x += sprintf(buf + x, " N%d=%lu",
 					node, nodes[node]);
 #endif
-	up_read(&slub_lock);
+	unlock_memory_hotplug();
 	kfree(nodes);
 	return x + sprintf(buf + x, "\n");
 }
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index bb86d29..6da5dae 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,7 +1392,7 @@
 	ax25_cb *ax25;
 	int err = 0;
 
-	memset(fsa, 0, sizeof(fsa));
+	memset(fsa, 0, sizeof(*fsa));
 	lock_sock(sk);
 	ax25 = ax25_sk(sk);
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 06d0e7b..54277df 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5523,34 +5523,6 @@
 	}
 }
 
-/**
- *	dev_txq_stats_fold - fold tx_queues stats
- *	@dev: device to get statistics from
- *	@stats: struct rtnl_link_stats64 to hold results
- */
-void dev_txq_stats_fold(const struct net_device *dev,
-			struct rtnl_link_stats64 *stats)
-{
-	u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
-	unsigned int i;
-	struct netdev_queue *txq;
-
-	for (i = 0; i < dev->num_tx_queues; i++) {
-		txq = netdev_get_tx_queue(dev, i);
-		spin_lock_bh(&txq->_xmit_lock);
-		tx_bytes   += txq->tx_bytes;
-		tx_packets += txq->tx_packets;
-		tx_dropped += txq->tx_dropped;
-		spin_unlock_bh(&txq->_xmit_lock);
-	}
-	if (tx_bytes || tx_packets || tx_dropped) {
-		stats->tx_bytes   = tx_bytes;
-		stats->tx_packets = tx_packets;
-		stats->tx_dropped = tx_dropped;
-	}
-}
-EXPORT_SYMBOL(dev_txq_stats_fold);
-
 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
  * fields in the same order, with only the type differing.
  */
@@ -5594,7 +5566,6 @@
 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
 	} else {
 		netdev_stats_to_stats64(storage, &dev->stats);
-		dev_txq_stats_fold(dev, storage);
 	}
 	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
 	return storage;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 19d6c21..d31bb36 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -380,6 +380,8 @@
 	}
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(skb->nfct);
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	nf_conntrack_put_reasm(skb->nfct_reasm);
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f9d7ac9..44d2b42 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -351,7 +351,7 @@
  * @sizeof_priv: Size of additional driver-private structure to be allocated
  *	for this Ethernet device
  * @txqs: The number of TX queues this device has.
- * @txqs: The number of RX queues this device has.
+ * @rxqs: The number of RX queues this device has.
  *
  * Fill in the fields of the device structure with Ethernet-generic
  * values. Basically does everything except registering the device.
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 94b5bf1..5f8d242 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -401,6 +401,9 @@
 		goto drop;
 	}
 
+	if (skb->pkt_type != PACKET_HOST)
+		goto drop;
+
 	skb_forward_csum(skb);
 
 	/*
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 99abfb5..97c5b21 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -19,13 +19,15 @@
 
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_bridge.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#endif
+#include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
@@ -33,8 +35,10 @@
 {
 	u16 zone = NF_CT_DEFAULT_ZONE;
 
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	if (skb->nfct)
 		zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+#endif
 
 #ifdef CONFIG_BRIDGE_NETFILTER
 	if (skb->nf_bridge &&
@@ -56,9 +60,11 @@
 {
 	struct sk_buff *reasm;
 
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	/* Previously seen (loopback)?	*/
 	if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
 		return NF_ACCEPT;
+#endif
 
 	reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
 	/* queued */
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 5cb8d30..2b7eef3 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -972,7 +972,8 @@
 free:
 	kfree_skb(skb2);
 out:
-	return err;
+	/* this avoids a loop in nfnetlink. */
+	return err == -EAGAIN ? -ENOBUFS : err;
 }
 
 #ifdef CONFIG_NF_NAT_NEEDED
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index af9360d..84ce48e 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -59,6 +59,10 @@
 	struct net_device *dev;
 	struct Qdisc *slaves;
 	struct list_head master_list;
+	unsigned long	tx_bytes;
+	unsigned long	tx_packets;
+	unsigned long	tx_errors;
+	unsigned long	tx_dropped;
 };
 
 struct teql_sched_data
@@ -274,7 +278,6 @@
 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct teql_master *master = netdev_priv(dev);
-	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
 	struct Qdisc *start, *q;
 	int busy;
 	int nores;
@@ -314,8 +317,8 @@
 					__netif_tx_unlock(slave_txq);
 					master->slaves = NEXT_SLAVE(q);
 					netif_wake_queue(dev);
-					txq->tx_packets++;
-					txq->tx_bytes += length;
+					master->tx_packets++;
+					master->tx_bytes += length;
 					return NETDEV_TX_OK;
 				}
 				__netif_tx_unlock(slave_txq);
@@ -342,10 +345,10 @@
 		netif_stop_queue(dev);
 		return NETDEV_TX_BUSY;
 	}
-	dev->stats.tx_errors++;
+	master->tx_errors++;
 
 drop:
-	txq->tx_dropped++;
+	master->tx_dropped++;
 	dev_kfree_skb(skb);
 	return NETDEV_TX_OK;
 }
@@ -398,6 +401,18 @@
 	return 0;
 }
 
+static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
+						     struct rtnl_link_stats64 *stats)
+{
+	struct teql_master *m = netdev_priv(dev);
+
+	stats->tx_packets	= m->tx_packets;
+	stats->tx_bytes		= m->tx_bytes;
+	stats->tx_errors	= m->tx_errors;
+	stats->tx_dropped	= m->tx_dropped;
+	return stats;
+}
+
 static int teql_master_mtu(struct net_device *dev, int new_mtu)
 {
 	struct teql_master *m = netdev_priv(dev);
@@ -422,6 +437,7 @@
 	.ndo_open	= teql_master_open,
 	.ndo_stop	= teql_master_close,
 	.ndo_start_xmit	= teql_master_xmit,
+	.ndo_get_stats64 = teql_master_stats64,
 	.ndo_change_mtu	= teql_master_mtu,
 };
 
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 75ee993..9576f35 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -137,7 +137,7 @@
 		ms_usage = 13;
 		break;
 	default:
-		return EINVAL;;
+		return -EINVAL;
 	}
 	salt[0] = (ms_usage >> 0) & 0xff;
 	salt[1] = (ms_usage >> 8) & 0xff;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index dec2a6f..bcdae78 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -67,7 +67,6 @@
 
 #define	RSI_HASHBITS	6
 #define	RSI_HASHMAX	(1<<RSI_HASHBITS)
-#define	RSI_HASHMASK	(RSI_HASHMAX-1)
 
 struct rsi {
 	struct cache_head	h;
@@ -319,7 +318,6 @@
 
 #define	RSC_HASHBITS	10
 #define	RSC_HASHMAX	(1<<RSC_HASHBITS)
-#define	RSC_HASHMASK	(RSC_HASHMAX-1)
 
 #define GSS_SEQ_WIN	128
 
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index e433e75..72ad836 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -37,7 +37,7 @@
 
 #define	 RPCDBG_FACILITY RPCDBG_CACHE
 
-static void cache_defer_req(struct cache_req *req, struct cache_head *item);
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
 static void cache_revisit_request(struct cache_head *item);
 
 static void cache_init(struct cache_head *h)
@@ -128,6 +128,7 @@
 {
 	head->expiry_time = expiry;
 	head->last_refresh = seconds_since_boot();
+	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
 	set_bit(CACHE_VALID, &head->flags);
 }
 
@@ -208,11 +209,36 @@
 		/* entry is valid */
 		if (test_bit(CACHE_NEGATIVE, &h->flags))
 			return -ENOENT;
-		else
+		else {
+			/*
+			 * In combination with write barrier in
+			 * sunrpc_cache_update, ensures that anyone
+			 * using the cache entry after this sees the
+			 * updated contents:
+			 */
+			smp_rmb();
 			return 0;
+		}
 	}
 }
 
+static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
+{
+	int rv;
+
+	write_lock(&detail->hash_lock);
+	rv = cache_is_valid(detail, h);
+	if (rv != -EAGAIN) {
+		write_unlock(&detail->hash_lock);
+		return rv;
+	}
+	set_bit(CACHE_NEGATIVE, &h->flags);
+	cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
+	write_unlock(&detail->hash_lock);
+	cache_fresh_unlocked(h, detail);
+	return -ENOENT;
+}
+
 /*
  * This is the generic cache management routine for all
  * the authentication caches.
@@ -251,14 +277,8 @@
 			case -EINVAL:
 				clear_bit(CACHE_PENDING, &h->flags);
 				cache_revisit_request(h);
-				if (rv == -EAGAIN) {
-					set_bit(CACHE_NEGATIVE, &h->flags);
-					cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
-					cache_fresh_unlocked(h, detail);
-					rv = -ENOENT;
-				}
+				rv = try_to_negate_entry(detail, h);
 				break;
-
 			case -EAGAIN:
 				clear_bit(CACHE_PENDING, &h->flags);
 				cache_revisit_request(h);
@@ -268,9 +288,11 @@
 	}
 
 	if (rv == -EAGAIN) {
-		cache_defer_req(rqstp, h);
-		if (!test_bit(CACHE_PENDING, &h->flags)) {
-			/* Request is not deferred */
+		if (!cache_defer_req(rqstp, h)) {
+			/*
+			 * Request was not deferred; handle it as best
+			 * we can ourselves:
+			 */
 			rv = cache_is_valid(detail, h);
 			if (rv == -EAGAIN)
 				rv = -ETIMEDOUT;
@@ -618,18 +640,19 @@
 		discard->revisit(discard, 1);
 }
 
-static void cache_defer_req(struct cache_req *req, struct cache_head *item)
+/* Return true if and only if a deferred request is queued. */
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
 {
 	struct cache_deferred_req *dreq;
 
 	if (req->thread_wait) {
 		cache_wait_req(req, item);
 		if (!test_bit(CACHE_PENDING, &item->flags))
-			return;
+			return false;
 	}
 	dreq = req->defer(req);
 	if (dreq == NULL)
-		return;
+		return false;
 	setup_deferral(dreq, item, 1);
 	if (!test_bit(CACHE_PENDING, &item->flags))
 		/* Bit could have been cleared before we managed to
@@ -638,6 +661,7 @@
 		cache_revisit_request(item);
 
 	cache_limit_defers();
+	return true;
 }
 
 static void cache_revisit_request(struct cache_head *item)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 0e659c6..08e05a8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1001,6 +1001,7 @@
 	rqstp->rq_splice_ok = 1;
 	/* Will be turned off only when NFSv4 Sessions are used */
 	rqstp->rq_usedeferral = 1;
+	rqstp->rq_dropme = false;
 
 	/* Setup reply header */
 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1102,7 +1103,7 @@
 		*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
 
 		/* Encode reply */
-		if (*statp == rpc_drop_reply) {
+		if (rqstp->rq_dropme) {
 			if (procp->pc_release)
 				procp->pc_release(rqstp, NULL, rqstp->rq_resp);
 			goto dropit;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3f2c555..ab86b79 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -13,6 +13,7 @@
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/svc_xprt.h>
 #include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
 
 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
 
@@ -128,6 +129,9 @@
 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
 		svcauth_unix_info_release(xprt);
 	put_net(xprt->xpt_net);
+	/* See comment on corresponding get in xs_setup_bc_tcp(): */
+	if (xprt->xpt_bc_xprt)
+		xprt_put(xprt->xpt_bc_xprt);
 	xprt->xpt_ops->xpo_free(xprt);
 	module_put(owner);
 }
@@ -303,6 +307,15 @@
 	list_del(&rqstp->rq_list);
 }
 
+static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
+{
+	if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
+		return true;
+	if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
+		return xprt->xpt_ops->xpo_has_wspace(xprt);
+	return false;
+}
+
 /*
  * Queue up a transport with data pending. If there are idle nfsd
  * processes, wake 'em up.
@@ -315,8 +328,7 @@
 	struct svc_rqst	*rqstp;
 	int cpu;
 
-	if (!(xprt->xpt_flags &
-	      ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
+	if (!svc_xprt_has_something_to_do(xprt))
 		return;
 
 	cpu = get_cpu();
@@ -343,28 +355,7 @@
 		dprintk("svc: transport %p busy, not enqueued\n", xprt);
 		goto out_unlock;
 	}
-	BUG_ON(xprt->xpt_pool != NULL);
-	xprt->xpt_pool = pool;
 
-	/* Handle pending connection */
-	if (test_bit(XPT_CONN, &xprt->xpt_flags))
-		goto process;
-
-	/* Handle close in-progress */
-	if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
-		goto process;
-
-	/* Check if we have space to reply to a request */
-	if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
-		/* Don't enqueue while not enough space for reply */
-		dprintk("svc: no write space, transport %p  not enqueued\n",
-			xprt);
-		xprt->xpt_pool = NULL;
-		clear_bit(XPT_BUSY, &xprt->xpt_flags);
-		goto out_unlock;
-	}
-
- process:
 	if (!list_empty(&pool->sp_threads)) {
 		rqstp = list_entry(pool->sp_threads.next,
 				   struct svc_rqst,
@@ -381,13 +372,11 @@
 		rqstp->rq_reserved = serv->sv_max_mesg;
 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
 		pool->sp_stats.threads_woken++;
-		BUG_ON(xprt->xpt_pool != pool);
 		wake_up(&rqstp->rq_wait);
 	} else {
 		dprintk("svc: transport %p put into queue\n", xprt);
 		list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
 		pool->sp_stats.sockets_queued++;
-		BUG_ON(xprt->xpt_pool != pool);
 	}
 
 out_unlock:
@@ -426,7 +415,6 @@
 void svc_xprt_received(struct svc_xprt *xprt)
 {
 	BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
-	xprt->xpt_pool = NULL;
 	/* As soon as we clear busy, the xprt could be closed and
 	 * 'put', so we need a reference to call svc_xprt_enqueue with:
 	 */
@@ -722,7 +710,10 @@
 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
 		dprintk("svc_recv: found XPT_CLOSE\n");
 		svc_delete_xprt(xprt);
-	} else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+		/* Leave XPT_BUSY set on the dead xprt: */
+		goto out;
+	}
+	if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
 		struct svc_xprt *newxpt;
 		newxpt = xprt->xpt_ops->xpo_accept(xprt);
 		if (newxpt) {
@@ -747,28 +738,23 @@
 			spin_unlock_bh(&serv->sv_lock);
 			svc_xprt_received(newxpt);
 		}
-		svc_xprt_received(xprt);
-	} else {
+	} else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
 		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
 			rqstp, pool->sp_id, xprt,
 			atomic_read(&xprt->xpt_ref.refcount));
 		rqstp->rq_deferred = svc_deferred_dequeue(xprt);
-		if (rqstp->rq_deferred) {
-			svc_xprt_received(xprt);
+		if (rqstp->rq_deferred)
 			len = svc_deferred_recv(rqstp);
-		} else {
+		else
 			len = xprt->xpt_ops->xpo_recvfrom(rqstp);
-			svc_xprt_received(xprt);
-		}
 		dprintk("svc: got len=%d\n", len);
 	}
+	svc_xprt_received(xprt);
 
 	/* No data, incomplete (TCP) read, or accept() */
-	if (len == 0 || len == -EAGAIN) {
-		rqstp->rq_res.len = 0;
-		svc_xprt_release(rqstp);
-		return -EAGAIN;
-	}
+	if (len == 0 || len == -EAGAIN)
+		goto out;
+
 	clear_bit(XPT_OLD, &xprt->xpt_flags);
 
 	rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
@@ -777,6 +763,10 @@
 	if (serv->sv_stats)
 		serv->sv_stats->netcnt++;
 	return len;
+out:
+	rqstp->rq_res.len = 0;
+	svc_xprt_release(rqstp);
+	return -EAGAIN;
 }
 EXPORT_SYMBOL_GPL(svc_recv);
 
@@ -935,7 +925,12 @@
 	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
 		/* someone else will have to effect the close */
 		return;
-
+	/*
+	 * We expect svc_close_xprt() to work even when no threads are
+	 * running (e.g., while configuring the server before starting
+	 * any threads), so if the transport isn't busy, we delete
+	 * it ourself:
+	 */
 	svc_delete_xprt(xprt);
 }
 EXPORT_SYMBOL_GPL(svc_close_xprt);
@@ -945,16 +940,16 @@
 	struct svc_xprt *xprt;
 	struct svc_xprt *tmp;
 
+	/*
+	 * The server is shutting down, and no more threads are running.
+	 * svc_xprt_enqueue() might still be running, but at worst it
+	 * will re-add the xprt to sp_sockets, which will soon get
+	 * freed.  So we don't bother with any more locking, and don't
+	 * leave the close to the (nonexistent) server threads:
+	 */
 	list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
-		if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
-			/* Waiting to be processed, but no threads left,
-			 * So just remove it from the waiting list
-			 */
-			list_del_init(&xprt->xpt_ready);
-			clear_bit(XPT_BUSY, &xprt->xpt_flags);
-		}
-		svc_close_xprt(xprt);
+		svc_delete_xprt(xprt);
 	}
 }
 
@@ -1028,6 +1023,7 @@
 	}
 	svc_xprt_get(rqstp->rq_xprt);
 	dr->xprt = rqstp->rq_xprt;
+	rqstp->rq_dropme = true;
 
 	dr->handle.revisit = svc_revisit;
 	return &dr->handle;
@@ -1065,14 +1061,13 @@
 	if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
 		return NULL;
 	spin_lock(&xprt->xpt_lock);
-	clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
 	if (!list_empty(&xprt->xpt_deferred)) {
 		dr = list_entry(xprt->xpt_deferred.next,
 				struct svc_deferred_req,
 				handle.recent);
 		list_del_init(&dr->handle.recent);
-		set_bit(XPT_DEFERRED, &xprt->xpt_flags);
-	}
+	} else
+		clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
 	spin_unlock(&xprt->xpt_lock);
 	return dr;
 }
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 4e9393c..7963569 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -118,7 +118,6 @@
 
 #define	DN_HASHBITS	6
 #define	DN_HASHMAX	(1<<DN_HASHBITS)
-#define	DN_HASHMASK	(DN_HASHMAX-1)
 
 static struct hlist_head	auth_domain_table[DN_HASHMAX];
 static spinlock_t	auth_domain_lock =
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 560677d..30916b0 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -30,7 +30,9 @@
 
 struct unix_domain {
 	struct auth_domain	h;
+#ifdef CONFIG_NFSD_DEPRECATED
 	int	addr_changes;
+#endif /* CONFIG_NFSD_DEPRECATED */
 	/* other stuff later */
 };
 
@@ -64,7 +66,9 @@
 			return NULL;
 		}
 		new->h.flavour = &svcauth_unix;
+#ifdef CONFIG_NFSD_DEPRECATED
 		new->addr_changes = 0;
+#endif /* CONFIG_NFSD_DEPRECATED */
 		rv = auth_domain_lookup(name, &new->h);
 	}
 }
@@ -85,14 +89,15 @@
  */
 #define	IP_HASHBITS	8
 #define	IP_HASHMAX	(1<<IP_HASHBITS)
-#define	IP_HASHMASK	(IP_HASHMAX-1)
 
 struct ip_map {
 	struct cache_head	h;
 	char			m_class[8]; /* e.g. "nfsd" */
 	struct in6_addr		m_addr;
 	struct unix_domain	*m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
 	int			m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
 };
 
 static void ip_map_put(struct kref *kref)
@@ -146,7 +151,9 @@
 
 	kref_get(&item->m_client->h.ref);
 	new->m_client = item->m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
 	new->m_add_change = item->m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
 }
 static struct cache_head *ip_map_alloc(void)
 {
@@ -331,6 +338,7 @@
 	ip.h.flags = 0;
 	if (!udom)
 		set_bit(CACHE_NEGATIVE, &ip.h.flags);
+#ifdef CONFIG_NFSD_DEPRECATED
 	else {
 		ip.m_add_change = udom->addr_changes;
 		/* if this is from the legacy set_client system call,
@@ -339,6 +347,7 @@
 		if (expiry == NEVER)
 			ip.m_add_change++;
 	}
+#endif /* CONFIG_NFSD_DEPRECATED */
 	ip.h.expiry_time = expiry;
 	ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
 				 hash_str(ipm->m_class, IP_HASHBITS) ^
@@ -358,6 +367,7 @@
 	return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
 }
 
+#ifdef CONFIG_NFSD_DEPRECATED
 int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom)
 {
 	struct unix_domain *udom;
@@ -402,8 +412,7 @@
 		return NULL;
 
 	if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
-		if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
-			auth_domain_put(&ipm->m_client->h);
+		sunrpc_invalidate(&ipm->h, sn->ip_map_cache);
 		rv = NULL;
 	} else {
 		rv = &ipm->m_client->h;
@@ -413,6 +422,7 @@
 	return rv;
 }
 EXPORT_SYMBOL_GPL(auth_unix_lookup);
+#endif /* CONFIG_NFSD_DEPRECATED */
 
 void svcauth_unix_purge(void)
 {
@@ -497,7 +507,6 @@
  */
 #define	GID_HASHBITS	8
 #define	GID_HASHMAX	(1<<GID_HASHBITS)
-#define	GID_HASHMASK	(GID_HASHMAX - 1)
 
 struct unix_gid {
 	struct cache_head	h;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d265aa7..7bd3bbb 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -331,19 +331,21 @@
 			len = onelen;
 			break;
 		}
-		if (toclose && strcmp(toclose, buf + len) == 0)
+		if (toclose && strcmp(toclose, buf + len) == 0) {
 			closesk = svsk;
-		else
+			svc_xprt_get(&closesk->sk_xprt);
+		} else
 			len += onelen;
 	}
 	spin_unlock_bh(&serv->sv_lock);
 
-	if (closesk)
+	if (closesk) {
 		/* Should unregister with portmap, but you cannot
 		 * unregister just one protocol...
 		 */
 		svc_close_xprt(&closesk->sk_xprt);
-	else if (toclose)
+		svc_xprt_put(&closesk->sk_xprt);
+	} else if (toclose)
 		return -ENOENT;
 	return len;
 }
@@ -992,15 +994,17 @@
 		vec[0] = rqstp->rq_arg.head[0];
 	} else {
 		/* REPLY */
-		if (svsk->sk_bc_xprt)
-			req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
+		struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
+
+		if (bc_xprt)
+			req = xprt_lookup_rqst(bc_xprt, xid);
 
 		if (!req) {
 			printk(KERN_NOTICE
 				"%s: Got unrecognized reply: "
-				"calldir 0x%x sk_bc_xprt %p xid %08x\n",
+				"calldir 0x%x xpt_bc_xprt %p xid %08x\n",
 				__func__, ntohl(calldir),
-				svsk->sk_bc_xprt, xid);
+				bc_xprt, xid);
 			vec[0] = rqstp->rq_arg.head[0];
 			goto out;
 		}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 4c8f18a..856274d 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -965,6 +965,7 @@
 	xprt = kzalloc(size, GFP_KERNEL);
 	if (xprt == NULL)
 		goto out;
+	kref_init(&xprt->kref);
 
 	xprt->max_reqs = max_req;
 	xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
@@ -1101,8 +1102,10 @@
 				-PTR_ERR(xprt));
 		return xprt;
 	}
+	if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
+		/* ->setup returned a pre-initialized xprt: */
+		return xprt;
 
-	kref_init(&xprt->kref);
 	spin_lock_init(&xprt->transport_lock);
 	spin_lock_init(&xprt->reserve_lock);
 
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 96549df..c431f5a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2359,6 +2359,15 @@
 	struct svc_sock *bc_sock;
 	struct rpc_xprt *ret;
 
+	if (args->bc_xprt->xpt_bc_xprt) {
+		/*
+		 * This server connection already has a backchannel
+		 * export; we can't create a new one, as we wouldn't be
+		 * able to match replies based on xid any more.  So,
+		 * reuse the already-existing one:
+		 */
+		 return args->bc_xprt->xpt_bc_xprt;
+	}
 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
 	if (IS_ERR(xprt))
 		return xprt;
@@ -2375,16 +2384,6 @@
 	xprt->reestablish_timeout = 0;
 	xprt->idle_timeout = 0;
 
-	/*
-	 * The backchannel uses the same socket connection as the
-	 * forechannel
-	 */
-	xprt->bc_xprt = args->bc_xprt;
-	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
-	bc_sock->sk_bc_xprt = xprt;
-	transport->sock = bc_sock->sk_sock;
-	transport->inet = bc_sock->sk_sk;
-
 	xprt->ops = &bc_tcp_ops;
 
 	switch (addr->sa_family) {
@@ -2407,6 +2406,20 @@
 			xprt->address_strings[RPC_DISPLAY_PROTO]);
 
 	/*
+	 * Once we've associated a backchannel xprt with a connection,
+	 * we want to keep it around as long as long as the connection
+	 * lasts, in case we need to start using it for a backchannel
+	 * again; this reference won't be dropped until bc_xprt is
+	 * destroyed.
+	 */
+	xprt_get(xprt);
+	args->bc_xprt->xpt_bc_xprt = xprt;
+	xprt->bc_xprt = args->bc_xprt;
+	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
+	transport->sock = bc_sock->sk_sock;
+	transport->inet = bc_sock->sk_sk;
+
+	/*
 	 * Since we don't want connections for the backchannel, we set
 	 * the xprt status to connected
 	 */
@@ -2415,6 +2428,7 @@
 
 	if (try_module_get(THIS_MODULE))
 		return xprt;
+	xprt_put(xprt);
 	ret = ERR_PTR(-EINVAL);
 out_err:
 	xprt_free(xprt);
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 52462ae..e032716 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -61,6 +61,9 @@
 -r::
 --realtime=::
 	Collect data with this RT SCHED_FIFO priority.
+-D::
+--no-delay::
+	Collect data without buffering.
 -A::
 --append::
 	Append to the output file to do incremental profiling.
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 7069bd3..df6064a 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -49,6 +49,7 @@
 static const char		*output_name			= "perf.data";
 static int			group				=      0;
 static int			realtime_prio			=      0;
+static bool			nodelay				=  false;
 static bool			raw_samples			=  false;
 static bool			sample_id_all_avail		=   true;
 static bool			system_wide			=  false;
@@ -307,6 +308,11 @@
 		attr->sample_type	|= PERF_SAMPLE_CPU;
 	}
 
+	if (nodelay) {
+		attr->watermark = 0;
+		attr->wakeup_events = 1;
+	}
+
 	attr->mmap		= track;
 	attr->comm		= track;
 	attr->inherit		= !no_inherit;
@@ -331,9 +337,6 @@
 			else if (err ==  ENODEV && cpu_list) {
 				die("No such device - did you specify"
 					" an out-of-range profile CPU?\n");
-			} else if (err == ENOENT) {
-				die("%s event is not supported. ",
-				     event_name(evsel));
 			} else if (err == EINVAL && sample_id_all_avail) {
 				/*
 				 * Old kernel, no attr->sample_id_type_all field
@@ -480,6 +483,7 @@
 			process_buildids();
 		perf_header__write(&session->header, output, true);
 		perf_session__delete(session);
+		perf_evsel_list__delete();
 		symbol__exit();
 	}
 }
@@ -845,6 +849,8 @@
 		    "record events on existing thread id"),
 	OPT_INTEGER('r', "realtime", &realtime_prio,
 		    "collect data with this RT SCHED_FIFO priority"),
+	OPT_BOOLEAN('D', "no-delay", &nodelay,
+		    "collect data without buffering"),
 	OPT_BOOLEAN('R', "raw-samples", &raw_samples,
 		    "collect raw sample records from all opened counters"),
 	OPT_BOOLEAN('a', "all-cpus", &system_wide,
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index abd4b84..29e7ffd 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1843,15 +1843,15 @@
 	"-f",
 	"-m", "1024",
 	"-c", "1",
-	"-e", "sched:sched_switch:r",
-	"-e", "sched:sched_stat_wait:r",
-	"-e", "sched:sched_stat_sleep:r",
-	"-e", "sched:sched_stat_iowait:r",
-	"-e", "sched:sched_stat_runtime:r",
-	"-e", "sched:sched_process_exit:r",
-	"-e", "sched:sched_process_fork:r",
-	"-e", "sched:sched_wakeup:r",
-	"-e", "sched:sched_migrate_task:r",
+	"-e", "sched:sched_switch",
+	"-e", "sched:sched_stat_wait",
+	"-e", "sched:sched_stat_sleep",
+	"-e", "sched:sched_stat_iowait",
+	"-e", "sched:sched_stat_runtime",
+	"-e", "sched:sched_process_exit",
+	"-e", "sched:sched_process_fork",
+	"-e", "sched:sched_wakeup",
+	"-e", "sched:sched_migrate_task",
 };
 
 static int __cmd_record(int argc, const char **argv)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index c385a63..0ff11d9 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -743,6 +743,7 @@
 out_free_fd:
 	list_for_each_entry(pos, &evsel_list, node)
 		perf_evsel__free_stat_priv(pos);
+	perf_evsel_list__delete();
 out:
 	thread_map__delete(threads);
 	threads = NULL;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 6ce4042..05344c6 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1247,8 +1247,6 @@
 				die("Permission error - are you root?\n"
 					"\t Consider tweaking"
 					" /proc/sys/kernel/perf_event_paranoid.\n");
-			if (err == ENOENT)
-				die("%s event is not supported. ", event_name(evsel));
 			/*
 			 * If it's cycles then fall back to hrtimer
 			 * based cpu-clock-tick sw counter, which
@@ -1473,6 +1471,8 @@
 		pos->attr.sample_period = default_interval;
 	}
 
+	sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
+
 	symbol_conf.priv_size = (sizeof(struct sym_entry) +
 				 (nr_counters + 1) * sizeof(unsigned long));
 
@@ -1490,6 +1490,7 @@
 out_free_fd:
 	list_for_each_entry(pos, &evsel_list, node)
 		perf_evsel__free_mmap(pos);
+	perf_evsel_list__delete();
 
 	return status;
 }
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 5b1ecd6..595d0f4 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -286,8 +286,6 @@
 	status = p->fn(argc, argv, prefix);
 	exit_browser(status);
 
-	perf_evsel_list__delete();
-
 	if (status)
 		return status & 0xff;